summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNeela Chithirala <chithiralan@google.com>2022-03-03 05:17:38 +0000
committerNeela Chithirala <chithiralan@google.com>2022-03-07 05:11:37 +0000
commit6d71338807ebff13916d2285d67ce59f261eab3e (patch)
treedd3bbbaab5286ec315807569c72fe790f8fea61d
parent49087428c8fea8eb57a4f57f4fe83e0d2f26b113 (diff)
downloadgs201-6d71338807ebff13916d2285d67ce59f261eab3e.tar.gz
Merge branch 'gs201-release' to android13-gs-pixel-5.10
* gs201-release: gxp: create own queue for power state transition Bug: 221187219 gxp: Fix checkpatch errors gxp: lower the default power state Bug: 201600514 gxp: support memory power state operation Bug: 201600514 gxp: change power state asynchronously Bug: 221187219 gxp: Support for mapping and unmapping dma-bufs Bug: 177224744 gxp: Remove extra call to gxp_pm_init() on probe gxp: support power state voting by wakelock IOCTL Bug: 201600514 gxp: add firmware authentication support Bug: 218949590 GitOrigin-RevId: ae2f794c392b0357bbad0f6b84c71896b9e61185 Change-Id: I42bf2d5a834700059b4e8f145b8a78a27aa7b668 Signed-off-by: Neela Chithirala <chithiralan@google.com> Change-Id: Id2ea88150f661804c06455924b799640c38b0480
-rw-r--r--Kconfig1
-rw-r--r--Makefile3
-rw-r--r--gxp-client.h18
-rw-r--r--gxp-debug-dump.c18
-rw-r--r--gxp-debug-dump.h6
-rw-r--r--gxp-dma-iommu-gem5.c22
-rw-r--r--gxp-dma-iommu.c127
-rw-r--r--gxp-dma-rmem.c19
-rw-r--r--gxp-dma.h32
-rw-r--r--gxp-dmabuf.c128
-rw-r--r--gxp-dmabuf.h39
-rw-r--r--gxp-firmware.c114
-rw-r--r--gxp-host-device-structs.h2
-rw-r--r--gxp-internal.h5
-rw-r--r--gxp-mailbox.c2
-rw-r--r--gxp-mapping.c10
-rw-r--r--gxp-mapping.h5
-rw-r--r--gxp-mb-notification.c2
-rw-r--r--gxp-platform.c172
-rw-r--r--gxp-pm.c261
-rw-r--r--gxp-pm.h86
-rw-r--r--gxp-tmp.h2
-rw-r--r--gxp-vd.c2
-rw-r--r--gxp.h70
-rw-r--r--include/soc/google/exynos_pm_qos.h64
25 files changed, 1093 insertions, 117 deletions
diff --git a/Kconfig b/Kconfig
index 0ebee89..1673e66 100644
--- a/Kconfig
+++ b/Kconfig
@@ -6,6 +6,7 @@ config GXP
tristate "Device driver for GXP"
default m
select GENERIC_ALLOCATOR
+ select DMA_SHARED_BUFFER
help
This driver supports the GXP device. Say Y if you want to
include this driver in the kernel.
diff --git a/Makefile b/Makefile
index 8076f25..e4d2063 100644
--- a/Makefile
+++ b/Makefile
@@ -10,6 +10,7 @@ gxp-objs += \
gxp-client.o \
gxp-debug-dump.o \
gxp-debugfs.o \
+ gxp-dmabuf.o \
gxp-doorbell.o \
gxp-firmware.o \
gxp-firmware-data.o \
@@ -81,7 +82,7 @@ ccflags-y += -DCONFIG_GXP_$(GXP_PLATFORM)
KBUILD_OPTIONS += CONFIG_GXP=m
ifdef CONFIG_GXP_TEST
-subdir-ccflags-y += -Wall -Werror
+subdir-ccflags-y += -Wall -Werror -I$(srctree)/drivers/gxp/include
obj-y += unittests/
include $(srctree)/drivers/gxp/unittests/Makefile.include
$(call include_test_path, $(gxp-objs))
diff --git a/gxp-client.h b/gxp-client.h
index 97ba489..ae85f3b 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -15,25 +15,25 @@
/* Holds state belonging to a client */
struct gxp_client {
- struct gxp_dev *gxp;
+ struct gxp_dev *gxp;
/*
* Protects all state of this client instance.
* Any operation that requires a client hold a particular wakelock must
* lock this semaphore for reading for the duration of that operation.
*/
- struct rw_semaphore semaphore;
+ struct rw_semaphore semaphore;
- bool has_block_wakelock;
- bool has_vd_wakelock;
+ bool has_block_wakelock;
+ bool has_vd_wakelock;
/* Value is one of the GXP_POWER_STATE_* values from gxp.h. */
- uint requested_power_state;
+ uint requested_power_state;
/* Value is one of the MEMORY_POWER_STATE_* values from gxp.h. */
- uint requested_memory_power_state;
+ uint requested_memory_power_state;
- struct gxp_virtual_device *vd;
- bool tpu_mbx_allocated;
- struct gxp_tpu_mbx_desc mbx_desc;
+ struct gxp_virtual_device *vd;
+ bool tpu_mbx_allocated;
+ struct gxp_tpu_mbx_desc mbx_desc;
};
/*
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index 87d5539..bbc4635 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -247,8 +247,8 @@ static void gxp_get_common_dump(struct gxp_dev *gxp)
#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
static void gxp_send_to_sscd(struct gxp_dev *gxp, void *segs, int seg_cnt,
- const char *info) {
-
+ const char *info)
+{
struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
struct sscd_platform_data *pdata =
(struct sscd_platform_data *)mgr->sscd_pdata;
@@ -337,11 +337,10 @@ static void gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
for (i = 0; i < GXP_NUM_CORE_SEGMENTS; i++)
core_dump_header->seg_header[i].valid = 0;
-
- return;
}
-static void gxp_free_segments(struct gxp_dev *gxp) {
+static void gxp_free_segments(struct gxp_dev *gxp)
+{
#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
int core_id;
@@ -351,7 +350,8 @@ static void gxp_free_segments(struct gxp_dev *gxp) {
kfree(gxp->debug_dump_mgr->common_dump);
}
-static int gxp_init_segments(struct gxp_dev *gxp) {
+static int gxp_init_segments(struct gxp_dev *gxp)
+{
#if !IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
return 0;
#else
@@ -384,7 +384,8 @@ err_out:
#endif
}
-static void gxp_handle_dram_dump(struct gxp_dev *gxp, uint32_t core_id) {
+static void gxp_handle_dram_dump(struct gxp_dev *gxp, uint32_t core_id)
+{
struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
struct gxp_core_dump_header *core_dump_header =
&mgr->core_dump->core_dump_header[core_id];
@@ -407,7 +408,8 @@ static void gxp_handle_dram_dump(struct gxp_dev *gxp, uint32_t core_id) {
}
static bool gxp_is_segment_valid(struct gxp_dev *gxp, uint32_t core_id,
- int seg_idx) {
+ int seg_idx)
+{
struct gxp_core_dump *core_dump;
struct gxp_core_dump_header *core_dump_header;
struct gxp_seg_header *seg_header;
diff --git a/gxp-debug-dump.h b/gxp-debug-dump.h
index 453c045..b1905b7 100644
--- a/gxp-debug-dump.h
+++ b/gxp-debug-dump.h
@@ -20,9 +20,9 @@
#define GXP_NUM_COMMON_SEGMENTS 2
#define GXP_NUM_CORE_SEGMENTS 8
#define GXP_CORE_DRAM_SEGMENT_IDX 7
-#define GXP_DEBUG_DUMP_CORE_SEGMENT_IDX_START GXP_NUM_COMMON_SEGMENTS + 1
-#define GXP_DEBUG_DUMP_DRAM_SEGMENT_IDX GXP_DEBUG_DUMP_CORE_SEGMENT_IDX_START \
- + GXP_CORE_DRAM_SEGMENT_IDX
+#define GXP_DEBUG_DUMP_CORE_SEGMENT_IDX_START (GXP_NUM_COMMON_SEGMENTS + 1)
+#define GXP_DEBUG_DUMP_DRAM_SEGMENT_IDX \
+ (GXP_DEBUG_DUMP_CORE_SEGMENT_IDX_START + GXP_CORE_DRAM_SEGMENT_IDX)
#define GXP_SEG_HEADER_NAME_LENGTH 32
#define GXP_Q7_ICACHE_SIZE 131072 /* I-cache size in bytes */
diff --git a/gxp-dma-iommu-gem5.c b/gxp-dma-iommu-gem5.c
index 4c691f6..321b560 100644
--- a/gxp-dma-iommu-gem5.c
+++ b/gxp-dma-iommu-gem5.c
@@ -119,9 +119,9 @@ int gxp_dma_map_resources(struct gxp_dev *gxp)
if (ret)
goto err;
/*
- * Firmware expects to access the sync barriers at a separate
- * address, lower than the rest of the AURORA_TOP registers.
- */
+ * Firmware expects to access the sync barriers at a separate
+ * address, lower than the rest of the AURORA_TOP registers.
+ */
ret = iommu_map(mgr->default_domain, GXP_IOVA_SYNC_BARRIERS,
gxp->regs.paddr + SYNC_BARRIERS_TOP_OFFSET,
SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE);
@@ -497,3 +497,19 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
/* Syncing is not domain specific. Just call through to DMA API */
dma_sync_sg_for_device(gxp->dev, sg, nents, direction);
}
+
+struct sg_table *
+gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ return dma_buf_map_attachment(attachment, direction);
+}
+
+void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+ struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction direction)
+{
+ dma_buf_unmap_attachment(attachment, sgt, direction);
+}
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index 05d76be..77b9d31 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -542,7 +542,7 @@ void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
struct sg_table *sgt;
dma_addr_t daddr;
int core;
- int ret;
+ ssize_t size_mapped;
size = size < PAGE_SIZE ? PAGE_SIZE : size;
@@ -566,9 +566,16 @@ void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
if (!(core_list & BIT(core)))
continue;
- ret = iommu_map_sg(mgr->core_domains[core], daddr, sgt->sgl,
- sgt->nents, IOMMU_READ | IOMMU_WRITE);
- if (ret != size)
+ /*
+ * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
+ * `ssize_t` to encode errors that earlier versions throw out.
+ * Explicitly cast here for backwards compatibility.
+ */
+ size_mapped =
+ (ssize_t)iommu_map_sg(mgr->core_domains[core], daddr,
+ sgt->sgl, sgt->orig_nents,
+ IOMMU_READ | IOMMU_WRITE);
+ if (size_mapped != size)
goto err;
}
@@ -789,6 +796,7 @@ int gxp_dma_map_sg(struct gxp_dev *gxp, uint core_list, struct scatterlist *sg,
dma_addr_t daddr;
int prot = dma_info_to_prot(direction, 0, attrs);
int core;
+ ssize_t size_mapped;
/* Variables needed to cleanup if an error occurs */
struct scatterlist *s;
int i;
@@ -804,8 +812,14 @@ int gxp_dma_map_sg(struct gxp_dev *gxp, uint core_list, struct scatterlist *sg,
if (!(core_list & BIT(core)))
continue;
- if (!iommu_map_sg(mgr->core_domains[core], daddr, sg, nents,
- prot))
+ /*
+ * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
+ * `ssize_t` to encode errors that earlier versions throw out.
+ * Explicitly cast here for backwards compatibility.
+ */
+ size_mapped = (ssize_t)iommu_map_sg(mgr->core_domains[core],
+ daddr, sg, nents, prot);
+ if (size_mapped <= 0)
goto err;
}
@@ -911,3 +925,104 @@ struct iommu_domain *gxp_dma_iommu_get_core_domain(struct gxp_dev *gxp,
return mgr->core_domains[core];
}
#endif // CONFIG_GXP_TEST
+
+struct sg_table *
+gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct gxp_dma_iommu_manager *mgr = container_of(
+ gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ struct sg_table *sgt;
+ int core;
+ int prot = dma_info_to_prot(direction, /*coherent=*/0, /*attrs=*/0);
+ ssize_t size_mapped;
+ int ret;
+ /* Variables needed to cleanup if an error occurs */
+ struct scatterlist *s;
+ int i;
+ size_t size = 0;
+
+ /* Map the attachment into the default domain */
+ sgt = dma_buf_map_attachment(attachment, direction);
+ if (IS_ERR(sgt)) {
+ dev_err(gxp->dev,
+ "DMA: dma_buf_map_attachment failed (ret=%ld)\n",
+ PTR_ERR(sgt));
+ return sgt;
+ }
+
+ /* Map the sgt into the aux domain of all specified cores */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(core_list & BIT(core)))
+ continue;
+
+ /*
+ * In Linux 5.15 and beyond, `iommu_map_sg()` returns a
+ * `ssize_t` to encode errors that earlier versions throw out.
+ * Explicitly cast here for backwards compatibility.
+ */
+ size_mapped =
+ (ssize_t)iommu_map_sg(mgr->core_domains[core],
+ sg_dma_address(sgt->sgl),
+ sgt->sgl, sgt->orig_nents, prot);
+ if (size_mapped <= 0) {
+ dev_err(gxp->dev,
+ "Failed to map dma-buf to core %d (ret=%ld)\n",
+ core, size_mapped);
+ /*
+ * Prior to Linux 5.15, `iommu_map_sg()` returns 0 for
+ * any failure. Return a generic IO error in this case.
+ */
+ ret = size_mapped == 0 ? -EIO : (int)size_mapped;
+ goto err;
+ }
+ }
+
+ return sgt;
+
+err:
+ for_each_sg(sgt->sgl, s, sgt->nents, i)
+ size += sg_dma_len(s);
+
+ for (core -= 1; core >= 0; core--)
+ iommu_unmap(mgr->core_domains[core], sg_dma_address(sgt->sgl),
+ size);
+ dma_buf_unmap_attachment(attachment, sgt, direction);
+
+ return ERR_PTR(ret);
+
+}
+
+void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+ struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction direction)
+{
+ struct gxp_dma_iommu_manager *mgr = container_of(
+ gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ struct scatterlist *s;
+ int i;
+ size_t size = 0;
+ int core;
+
+ /* Find the size of the mapping in IOVA-space */
+ for_each_sg(sgt->sgl, s, sgt->nents, i)
+ size += sg_dma_len(s);
+
+ /* Unmap the dma-buf from the aux domain of all specified cores */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(core_list & BIT(core)))
+ continue;
+
+ if (!iommu_unmap(mgr->core_domains[core],
+ sg_dma_address(sgt->sgl), size))
+ dev_warn(
+ gxp->dev,
+ "Failed to unmap dma-buf from core %d\n",
+ core);
+ }
+
+ /* Unmap the attachment from the default domain */
+ dma_buf_unmap_attachment(attachment, sgt, direction);
+}
diff --git a/gxp-dma-rmem.c b/gxp-dma-rmem.c
index fc2a2e4..578735e 100644
--- a/gxp-dma-rmem.c
+++ b/gxp-dma-rmem.c
@@ -594,3 +594,22 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
kunmap(sg_page(s));
}
}
+
+struct sg_table *
+gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ dev_warn(gxp->dev, "%s not supported on systems without an IOMMU\n",
+ __func__);
+ return ERR_PTR(-ENOSYS);
+}
+
+void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+ struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction direction)
+{
+ dev_warn(gxp->dev, "%s not supported on systems without an IOMMU\n",
+ __func__);
+}
diff --git a/gxp-dma.h b/gxp-dma.h
index e8131e7..3d72f65 100644
--- a/gxp-dma.h
+++ b/gxp-dma.h
@@ -7,6 +7,7 @@
#ifndef __GXP_DMA_H__
#define __GXP_DMA_H__
+#include <linux/dma-buf.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/types.h>
@@ -308,4 +309,35 @@ void gxp_dma_sync_sg_for_cpu(struct gxp_dev *gxp, struct scatterlist *sg,
void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
int nents, enum dma_data_direction direction);
+/**
+ * gxp_dma_map_dmabuf_attachment() - Create a mapping for a dma-buf
+ * @gxp: The GXP device to map the dma-buf for
+ * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @attachment: An attachment, representing the dma-buf, obtained from
+ * `dma_buf_attach()`
+ * @direction: DMA direction
+ *
+ * Return: A scatter-gather table describing the mapping of the dma-buf
+ * into the default IOMMU domain. Returns ERR_PTR on failure.
+ */
+struct sg_table *
+gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction);
+
+/**
+ * gxp_dma_unmap_dmabuf_attachment() - Unmap a dma-buf
+ * @gxp: The GXP device the dma-buf was mapped for
+ * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @attachment: The attachment, representing the dma-buf, that was passed to
+ * `gxp_dma_map_dmabuf_attachment()` to create the mapping
+ * @sgt: The scatter-gather table returned by `gxp_dma_map_dmabuf_attachment()`
+ * when mapping this dma-buf
+ * @direction: DMA direction
+ */
+void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+ struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction direction);
+
#endif /* __GXP_DMA_H__ */
diff --git a/gxp-dmabuf.c b/gxp-dmabuf.c
new file mode 100644
index 0000000..5d0cf9b
--- /dev/null
+++ b/gxp-dmabuf.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for using dma-bufs.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include "gxp-dma.h"
+#include "gxp-dmabuf.h"
+
+struct gxp_dmabuf_mapping {
+ struct gxp_mapping mapping;
+ struct dma_buf *dmabuf;
+ struct dma_buf_attachment *attachment;
+ /*
+ * For normal mappings, the `sg_table` is embedded directly in the
+ * `gxp_mapping` and populated by `sg_alloc_table_from_pages()`.
+ * For dma-bufs however, a pointer to the `sg_table` is returned by
+ * `dma_buf_map_attachment()`.
+ *
+ * Rather than manage the memory of `gxp_mapping`'s `sg_table`
+ * independently so it can contain a pointer, dma-bufs store their
+ * `sg_table` pointer here and ignore `mapping->sgt`.
+ */
+ struct sg_table *sgt;
+};
+
+struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp, uint core_list, int fd,
+ u32 flags, enum dma_data_direction dir)
+{
+ struct dma_buf *dmabuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+ struct gxp_dmabuf_mapping *dmabuf_mapping;
+ int ret = 0;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf)) {
+ dev_err(gxp->dev, "Failed to get dma-buf to map (ret=%ld)\n",
+ PTR_ERR(dmabuf));
+ return ERR_CAST(dmabuf);
+ }
+
+ attachment = dma_buf_attach(dmabuf, gxp->dev);
+ if (IS_ERR(attachment)) {
+ dev_err(gxp->dev, "Failed to attach dma-buf to map (ret=%ld)\n",
+ PTR_ERR(attachment));
+ ret = PTR_ERR(attachment);
+ goto err_attach;
+ }
+
+ sgt = gxp_dma_map_dmabuf_attachment(gxp, core_list, attachment, dir);
+ if (IS_ERR(sgt)) {
+ dev_err(gxp->dev,
+ "Failed to map dma-buf attachment (ret=%ld)\n",
+ PTR_ERR(sgt));
+ ret = PTR_ERR(sgt);
+ goto err_map_attachment;
+ }
+
+ dmabuf_mapping = kzalloc(sizeof(*dmabuf_mapping), GFP_KERNEL);
+ if (!dmabuf_mapping) {
+ ret = -ENOMEM;
+ goto err_alloc_mapping;
+ }
+
+ /* dma-buf mappings are indicated by a host_address of 0 */
+ dmabuf_mapping->mapping.host_address = 0;
+ dmabuf_mapping->mapping.core_list = core_list;
+ dmabuf_mapping->mapping.device_address = sg_dma_address(sgt->sgl);
+ dmabuf_mapping->mapping.dir = dir;
+ dmabuf_mapping->dmabuf = dmabuf;
+ dmabuf_mapping->attachment = attachment;
+ dmabuf_mapping->sgt = sgt;
+ ret = gxp_mapping_put(gxp, &dmabuf_mapping->mapping);
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to store mapping for dma-buf (ret=%d)\n", ret);
+ goto err_put_mapping;
+ }
+
+ return &dmabuf_mapping->mapping;
+
+err_put_mapping:
+ kfree(dmabuf_mapping);
+err_alloc_mapping:
+ gxp_dma_unmap_dmabuf_attachment(gxp, core_list, attachment, sgt, dir);
+err_map_attachment:
+ dma_buf_detach(dmabuf, attachment);
+err_attach:
+ dma_buf_put(dmabuf);
+ return ERR_PTR(ret);
+}
+
+void gxp_dmabuf_unmap(struct gxp_dev *gxp, dma_addr_t device_address)
+{
+ struct gxp_dmabuf_mapping *dmabuf_mapping;
+ struct gxp_mapping *mapping;
+
+ /*
+ * Fetch and remove the internal mapping records.
+ * If host_address is not 0, the provided device_address belongs to a
+ * non-dma-buf mapping.
+ */
+ mapping = gxp_mapping_get(gxp, device_address);
+ if (IS_ERR_OR_NULL(mapping) || mapping->host_address) {
+ dev_warn(gxp->dev, "No dma-buf mapped for given IOVA\n");
+ return;
+ }
+
+ gxp_mapping_remove(gxp, mapping);
+
+ /* Unmap and detach the dma-buf */
+ dmabuf_mapping =
+ container_of(mapping, struct gxp_dmabuf_mapping, mapping);
+
+ gxp_dma_unmap_dmabuf_attachment(gxp, mapping->core_list,
+ dmabuf_mapping->attachment,
+ dmabuf_mapping->sgt, mapping->dir);
+ dma_buf_detach(dmabuf_mapping->dmabuf, dmabuf_mapping->attachment);
+ dma_buf_put(dmabuf_mapping->dmabuf);
+
+ kfree(dmabuf_mapping);
+}
diff --git a/gxp-dmabuf.h b/gxp-dmabuf.h
new file mode 100644
index 0000000..87ac7cc
--- /dev/null
+++ b/gxp-dmabuf.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for using dma-bufs.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#ifndef __GXP_DMABUF_H__
+#define __GXP_DMABUF_H__
+
+#include <linux/dma-direction.h>
+#include <linux/types.h>
+
+#include "gxp-internal.h"
+#include "gxp-mapping.h"
+
+/**
+ * gxp_dmabuf_map() - Map a dma-buf for access by the specified physical cores
+ * @gxp: The GXP device to map the dma-buf for
+ * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @fd: A file descriptor for the dma-buf to be mapped
+ * @flags: The type of mapping to create; Currently unused
+ * @direction: DMA direction
+ *
+ * Return: The structure that was created and is being tracked to describe the
+ * mapping of the dma-buf. Returns ERR_PTR on failure.
+ */
+struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp, uint core_list, int fd,
+ u32 flags, enum dma_data_direction dir);
+
+/**
+ * gxp_dmabuf_unmap - Unmap a dma-buf previously mapped with `gxp_dmabuf_map()`
+ * @gxp: The GXP device the dma-buf was mapped for.
+ * @device_address: The IOVA the dma-buf was mapped to. Should be obtained from
+ * the `device_address` field of the `struct gxp_mapping`
+ * returned by `gxp_dmabuf_map()`
+ */
+void gxp_dmabuf_unmap(struct gxp_dev *gxp, dma_addr_t device_address);
+
+#endif /* __GXP_DMABUF_H__ */
diff --git a/gxp-firmware.c b/gxp-firmware.c
index 1557696..a9a7e5a 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -7,8 +7,10 @@
#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/elf.h>
#include <linux/firmware.h>
+#include <linux/gsa/gsa_image_auth.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -34,19 +36,22 @@
#define Q7_ELF_FILE2 "gxp_fw_core2"
#define Q7_ELF_FILE3 "gxp_fw_core3"
+#define FW_HEADER_SIZE (0x1000)
+#define FW_IMAGE_TYPE_OFFSET (0x400)
+
static const struct firmware *fw[GXP_NUM_CORES];
static void __iomem *aurora_base;
static char *fw_elf[] = {Q7_ELF_FILE0, Q7_ELF_FILE1, Q7_ELF_FILE2,
Q7_ELF_FILE3};
-static int elf_load_segments(struct gxp_dev *gxp, const struct firmware *fw,
- u8 core)
+static int elf_load_segments(struct gxp_dev *gxp, const u8 *elf_data,
+ size_t size,
+ const struct gxp_mapped_resource *buffer)
{
struct elf32_hdr *ehdr;
struct elf32_phdr *phdr;
int i, ret = 0;
- const u8 *elf_data = fw->data;
ehdr = (struct elf32_hdr *)elf_data;
phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
@@ -76,9 +81,9 @@ static int elf_load_segments(struct gxp_dev *gxp, const struct firmware *fw,
if (!memsz)
continue;
- if (!((da >= (u32)gxp->fwbufs[core].daddr) &&
- ((da + memsz) <= ((u32)gxp->fwbufs[core].daddr +
- (u32)gxp->fwbufs[core].size - 1)))) {
+ if (!((da >= (u32)buffer->daddr) &&
+ ((da + memsz) <= ((u32)buffer->daddr +
+ (u32)buffer->size - 1)))) {
/*
* Some BSS data may be referenced from TCM, and can be
* skipped while loading
@@ -98,15 +103,15 @@ static int elf_load_segments(struct gxp_dev *gxp, const struct firmware *fw,
break;
}
- if (offset + filesz > fw->size) {
+ if (offset + filesz > size) {
dev_err(gxp->dev, "Truncated fw: need 0x%x avail 0x%zx\n",
- offset + filesz, fw->size);
+ offset + filesz, size);
ret = -EINVAL;
break;
}
/* grab the kernel address for this device address */
- ptr = gxp->fwbufs[core].vaddr + (da - gxp->fwbufs[core].daddr);
+ ptr = buffer->vaddr + (da - buffer->daddr);
if (!ptr) {
dev_err(gxp->dev, "Bad phdr: da 0x%llx mem 0x%x\n",
da, memsz);
@@ -128,6 +133,86 @@ static int elf_load_segments(struct gxp_dev *gxp, const struct firmware *fw,
return ret;
}
+/* TODO (b/220246540): remove after unsigned firmware support is phased out */
+static bool gxp_firmware_image_is_signed(const u8 *data)
+{
+ return data[FW_IMAGE_TYPE_OFFSET + 0] == 'D' &&
+ data[FW_IMAGE_TYPE_OFFSET + 1] == 'S' &&
+ data[FW_IMAGE_TYPE_OFFSET + 2] == 'P' &&
+ data[FW_IMAGE_TYPE_OFFSET + 3] == 'F';
+}
+
+static int
+gxp_firmware_load_authenticated(struct gxp_dev *gxp, const struct firmware *fw,
+ const struct gxp_mapped_resource *buffer)
+{
+ const u8 *data = fw->data;
+ size_t size = fw->size;
+ void *header_vaddr;
+ dma_addr_t header_dma_addr;
+ int ret;
+
+ /* TODO (b/220246540): remove after unsigned firmware support is phased out */
+ if (!gxp_firmware_image_is_signed(data)) {
+ dev_info(gxp->dev, "Loading unsigned firmware\n");
+ return elf_load_segments(gxp, data, size, buffer);
+ }
+
+ if (!gxp->gsa_dev) {
+ dev_warn(
+ gxp->dev,
+ "No GSA device available, skipping firmware authentication\n");
+ return elf_load_segments(gxp, data + FW_HEADER_SIZE,
+ size - FW_HEADER_SIZE, buffer);
+ }
+
+ if ((size - FW_HEADER_SIZE) > buffer->size) {
+ dev_err(gxp->dev, "Firmware image does not fit (%zu > %llu)\n",
+ size - FW_HEADER_SIZE, buffer->size);
+ return -EINVAL;
+ }
+
+ dev_dbg(gxp->dev, "Authenticating firmware\n");
+
+ /* Allocate coherent memory for the image header */
+ header_vaddr = dma_alloc_coherent(gxp->gsa_dev, FW_HEADER_SIZE,
+ &header_dma_addr, GFP_KERNEL);
+ if (!header_vaddr) {
+ dev_err(gxp->dev,
+ "Failed to allocate coherent memory for header\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the header to GSA coherent memory */
+ memcpy(header_vaddr, data, FW_HEADER_SIZE);
+
+ /* Copy the firmware image to the carveout location, skipping the header */
+ memcpy_toio(buffer->vaddr, data + FW_HEADER_SIZE,
+ size - FW_HEADER_SIZE);
+
+ dev_dbg(gxp->dev,
+ "Requesting GSA authentication. meta = %pad payload = %pap",
+ &header_dma_addr, &buffer->paddr);
+
+ ret = gsa_authenticate_image(gxp->gsa_dev, header_dma_addr,
+ buffer->paddr);
+ if (ret) {
+ dev_err(gxp->dev, "GSA authentication failed: %d\n", ret);
+ } else {
+ dev_dbg(gxp->dev,
+ "Authentication succeeded, loading ELF segments\n");
+ ret = elf_load_segments(gxp, data + FW_HEADER_SIZE,
+ size - FW_HEADER_SIZE, buffer);
+ if (ret)
+ dev_err(gxp->dev, "ELF parsing failed (%d)\n", ret);
+ }
+
+ dma_free_coherent(gxp->gsa_dev, FW_HEADER_SIZE, header_vaddr,
+ header_dma_addr);
+
+ return ret;
+}
+
/* Forward declaration for usage inside gxp_firmware_load(..). */
static void gxp_firmware_unload(struct gxp_dev *gxp, uint core);
@@ -173,8 +258,8 @@ static int gxp_firmware_load(struct gxp_dev *gxp, uint core)
goto out_firmware_unload;
}
- /* Load firmware to System RAM */
- ret = elf_load_segments(gxp, fw[core], core);
+ /* Authenticate and load firmware to System RAM */
+ ret = gxp_firmware_load_authenticated(gxp, fw[core], &gxp->fwbufs[core]);
if (ret) {
dev_err(gxp->dev, "Unable to load elf file\n");
goto out_firmware_unload;
@@ -277,9 +362,8 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
if (readl(core_scratchpad_base + offset) != Q7_ALIVE_MAGIC) {
dev_err(gxp->dev, "Core %u did not respond!\n", core);
return -EIO;
- } else {
- dev_notice(gxp->dev, "Core %u is alive!\n", core);
}
+ dev_notice(gxp->dev, "Core %u is alive!\n", core);
#ifndef CONFIG_GXP_GEM5
/*
@@ -302,10 +386,8 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
if (readl(core_scratchpad_base + offset) != expected_top_value) {
dev_err(gxp->dev, "TOP access from core %u failed!\n", core);
return -EIO;
- } else {
- dev_notice(gxp->dev, "TOP access from core %u successful!\n",
- core);
}
+ dev_notice(gxp->dev, "TOP access from core %u successful!\n", core);
#endif // !CONFIG_GXP_GEM5
/* Stop bus performance monitors */
diff --git a/gxp-host-device-structs.h b/gxp-host-device-structs.h
index 52b5531..c8d4b44 100644
--- a/gxp-host-device-structs.h
+++ b/gxp-host-device-structs.h
@@ -86,7 +86,7 @@ struct gxp_watchdog_descriptor {
};
/*
- * A structure describing the telemetry (loggging and tracing) parameters and
+ * A structure describing the telemetry (logging and tracing) parameters and
* buffers.
*/
struct gxp_telemetry_descriptor {
diff --git a/gxp-internal.h b/gxp-internal.h
index 6ee9ee6..1fa71d1 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -105,6 +105,11 @@ struct gxp_dev {
struct gxp_tpu_dev tpu_dev;
struct gxp_telemetry_manager *telemetry_mgr;
struct gxp_wakelock_manager *wakelock_mgr;
+ /*
+ * Pointer to GSA device for firmware authentication.
+ * May be NULL if the chip does not support firmware authentication
+ */
+ struct device *gsa_dev;
};
/* GXP device IO functions */
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index ad3c688..7a42d71 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -554,8 +554,6 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
mailbox->descriptor_device_addr);
destroy_workqueue(mailbox->response_wq);
kfree(mailbox);
-
- return;
}
void gxp_mailbox_reset(struct gxp_mailbox *mailbox)
diff --git a/gxp-mapping.c b/gxp-mapping.c
index 95873c0..5db126b 100644
--- a/gxp-mapping.c
+++ b/gxp-mapping.c
@@ -324,6 +324,12 @@ struct gxp_mapping *gxp_mapping_get_host(struct gxp_dev *gxp, u64 host_address)
mutex_lock(&gxp->mappings->lock);
+ if (!host_address) {
+ dev_warn(gxp->dev,
+ "Unable to get dma-buf mapping by host address\n");
+ return NULL;
+ }
+
/* Iterate through the elements in the rbtree */
for (node = rb_first(&gxp->mappings->rb); node; node = rb_next(node)) {
this = rb_entry(node, struct gxp_mapping, node);
@@ -340,5 +346,9 @@ struct gxp_mapping *gxp_mapping_get_host(struct gxp_dev *gxp, u64 host_address)
void gxp_mapping_remove(struct gxp_dev *gxp, struct gxp_mapping *map)
{
+ mutex_lock(&gxp->mappings->lock);
+
rb_erase(&map->node, &gxp->mappings->rb);
+
+ mutex_unlock(&gxp->mappings->lock);
}
diff --git a/gxp-mapping.h b/gxp-mapping.h
index a0225ae..3fe6293 100644
--- a/gxp-mapping.h
+++ b/gxp-mapping.h
@@ -22,6 +22,11 @@ struct gxp_mapping_root {
struct gxp_mapping {
struct rb_node node;
+ /*
+ * User-space address of the mapped buffer.
+ * If this value is 0, it indicates this mapping is for a dma-buf and
+ * should not be used if a regular buffer mapping was expected.
+ */
u64 host_address;
uint core_list;
/*
diff --git a/gxp-mb-notification.c b/gxp-mb-notification.c
index 3198984..77412f8 100644
--- a/gxp-mb-notification.c
+++ b/gxp-mb-notification.c
@@ -37,7 +37,7 @@ int gxp_notification_unregister_handler(struct gxp_dev *gxp, uint core,
mailbox = gxp->mailbox_mgr->mailboxes[core];
if (!mailbox)
- return-ENODEV;
+ return -ENODEV;
return gxp_mailbox_unregister_interrupt_handler(mailbox, type);
}
diff --git a/gxp-platform.c b/gxp-platform.c
index 35634c0..3061935 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -31,6 +31,7 @@
#include "gxp-debug-dump.h"
#include "gxp-debugfs.h"
#include "gxp-dma.h"
+#include "gxp-dmabuf.h"
#include "gxp-firmware.h"
#include "gxp-firmware-data.h"
#include "gxp-internal.h"
@@ -62,6 +63,16 @@ static struct platform_device gxp_sscd_dev = {
};
#endif // CONFIG_SUBSYSTEM_COREDUMP
+/* Mapping from GXP_POWER_STATE_* to enum aur_power_state in gxp-pm.h */
+static const uint aur_state_array[GXP_POWER_STATE_NOM + 1] = { AUR_OFF, AUR_UUD,
+ AUR_SUD, AUR_UD,
+ AUR_NOM };
+/* Mapping from MEMORY_POWER_STATE_* to enum aur_memory_power_state in gxp-pm.h */
+static const uint aur_memory_state_array[MEMORY_POWER_STATE_MAX + 1] = {
+ AUR_MEM_UNDEFINED, AUR_MEM_MIN, AUR_MEM_VERY_LOW, AUR_MEM_LOW,
+ AUR_MEM_HIGH, AUR_MEM_VERY_HIGH, AUR_MEM_MAX
+};
+
static int gxp_open(struct inode *inode, struct file *file)
{
struct gxp_client *client;
@@ -229,6 +240,10 @@ static int gxp_unmap_buffer(struct gxp_client *client,
if (!map) {
ret = -EINVAL;
goto out;
+ } else if (!map->host_address) {
+ dev_err(gxp->dev, "dma-bufs must be unmapped via GXP_UNMAP_DMABUF\n");
+ ret = -EINVAL;
+ goto out;
}
WARN_ON(map->host_address != ibuf.host_address);
@@ -561,7 +576,8 @@ gxp_etm_trace_start_command(struct gxp_client *client,
if (ibuf.pc_match_mask_length > ETM_TRACE_PC_MATCH_MASK_LEN_MAX)
return -EINVAL;
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
+ phys_core =
+ gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Trace start failed: Invalid virtual core id (%u)\n",
ibuf.virtual_core_id);
@@ -912,6 +928,25 @@ static int gxp_acquire_wake_lock(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_OFF) {
+ dev_err(gxp->dev,
+ "GXP_POWER_STATE_OFF is not a valid value when acquiring a wakelock\n");
+ return -EINVAL;
+ }
+ if (ibuf.gxp_power_state < GXP_POWER_STATE_OFF ||
+ ibuf.gxp_power_state > GXP_POWER_STATE_NOM) {
+ dev_err(gxp->dev, "Requested power state is invalid\n");
+ return -EINVAL;
+ }
+ if ((ibuf.memory_power_state < MEMORY_POWER_STATE_MIN ||
+ ibuf.memory_power_state > MEMORY_POWER_STATE_MAX) &&
+ ibuf.memory_power_state != MEMORY_POWER_STATE_UNDEFINED) {
+ dev_err(gxp->dev,
+ "Requested memory power state %d is invalid\n",
+ ibuf.memory_power_state);
+ return -EINVAL;
+ }
+
down_write(&client->semaphore);
/* Acquire a BLOCK wakelock if requested */
@@ -957,6 +992,15 @@ static int gxp_acquire_wake_lock(struct gxp_client *client,
client->has_vd_wakelock = true;
}
+ gxp_pm_update_requested_power_state(
+ gxp, client->requested_power_state,
+ aur_state_array[ibuf.gxp_power_state]);
+ client->requested_power_state = aur_state_array[ibuf.gxp_power_state];
+ gxp_pm_update_requested_memory_power_state(
+ gxp, client->requested_memory_power_state,
+ aur_memory_state_array[ibuf.memory_power_state]);
+ client->requested_memory_power_state =
+ aur_memory_state_array[ibuf.memory_power_state];
out:
up_write(&client->semaphore);
@@ -1022,6 +1066,17 @@ static int gxp_release_wake_lock(struct gxp_client *client, __u32 __user *argp)
}
gxp_wakelock_release(gxp);
+ /*
+ * Other clients may still be using the BLK_AUR, check if we need
+ * to change the power state.
+ */
+ gxp_pm_update_requested_power_state(
+ gxp, client->requested_power_state, AUR_OFF);
+ client->requested_power_state = AUR_OFF;
+ gxp_pm_update_requested_memory_power_state(
+ gxp, client->requested_memory_power_state,
+ AUR_MEM_UNDEFINED);
+ client->requested_memory_power_state = AUR_MEM_UNDEFINED;
client->has_block_wakelock = false;
}
@@ -1032,6 +1087,86 @@ out:
return ret;
}
+static int gxp_map_dmabuf(struct gxp_client *client,
+ struct gxp_map_dmabuf_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_map_dmabuf_ioctl ibuf;
+ struct gxp_mapping *mapping;
+ int ret = 0;
+ uint phys_core_list;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_MAP_DMABUF requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
+ client->vd, ibuf.virtual_core_list);
+ if (phys_core_list == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mapping = gxp_dmabuf_map(gxp, phys_core_list, ibuf.dmabuf_fd,
+ /*gxp_dma_flags=*/0,
+ mapping_flags_to_dma_dir(ibuf.flags));
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ dev_err(gxp->dev, "Failed to map dma-buf (ret=%d)\n", ret);
+ goto out;
+ }
+
+ ibuf.device_address = mapping->device_address;
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ /* If the IOCTL fails, the dma-buf must be unmapped */
+ gxp_dmabuf_unmap(gxp, ibuf.device_address);
+ ret = -EFAULT;
+ }
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_unmap_dmabuf(struct gxp_client *client,
+ struct gxp_map_dmabuf_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_map_dmabuf_ioctl ibuf;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_UNMAP_DMABUF requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ gxp_dmabuf_unmap(gxp, ibuf.device_address);
+
+out:
+ up_read(&client->semaphore);
+
+ return ret;
+}
+
static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
{
struct gxp_client *client = file->private_data;
@@ -1099,6 +1234,12 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_RELEASE_WAKE_LOCK:
ret = gxp_release_wake_lock(client, argp);
break;
+ case GXP_MAP_DMABUF:
+ ret = gxp_map_dmabuf(client, argp);
+ break;
+ case GXP_UNMAP_DMABUF:
+ ret = gxp_unmap_dmabuf(client, argp);
+ break;
default:
ret = -ENOTTY; /* unknown command */
}
@@ -1144,9 +1285,10 @@ static int gxp_platform_probe(struct platform_device *pdev)
phys_addr_t offset, base_addr;
struct device_node *np;
struct platform_device *tpu_pdev;
+ struct platform_device *gsa_pdev;
int ret;
- int i __maybe_unused;
- bool tpu_found __maybe_unused;
+ int __maybe_unused i;
+ bool __maybe_unused tpu_found;
dev_notice(dev, "Probing gxp driver with commit %s\n", GIT_REPO_TAG);
@@ -1293,10 +1435,30 @@ static int gxp_platform_probe(struct platform_device *pdev)
goto err_vd_destroy;
}
+ /* Get GSA device from device tree */
+ np = of_parse_phandle(dev->of_node, "gsa-device", 0);
+ if (!np) {
+ dev_warn(
+ dev,
+ "No gsa-device in device tree. Firmware authentication not available\n");
+ } else {
+ gsa_pdev = of_find_device_by_node(np);
+ if (!gsa_pdev) {
+ dev_err(dev, "GSA device not found\n");
+ of_node_put(np);
+ ret = -ENODEV;
+ goto err_vd_destroy;
+ }
+ gxp->gsa_dev = get_device(&gsa_pdev->dev);
+ of_node_put(np);
+ dev_info(
+ dev,
+ "GSA device found, Firmware authentication available\n");
+ }
+
gxp_fw_data_init(gxp);
gxp_telemetry_init(gxp);
gxp_create_debugfs(gxp);
- gxp_pm_init(gxp);
gxp->thermal_mgr = gxp_thermal_init(gxp);
if (!gxp->thermal_mgr)
dev_err(dev, "Failed to init thermal driver\n");
@@ -1334,6 +1496,8 @@ static int gxp_platform_remove(struct platform_device *pdev)
#ifndef CONFIG_GXP_USE_SW_MAILBOX
put_device(gxp->tpu_dev.dev);
#endif
+ if (gxp->gsa_dev)
+ put_device(gxp->gsa_dev);
misc_deregister(&gxp->misc_dev);
gxp_pm_destroy(gxp);
diff --git a/gxp-pm.c b/gxp-pm.c
index 697af81..87da768 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -9,10 +9,12 @@
#include <linux/pm_runtime.h>
#include <linux/refcount.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#ifdef CONFIG_GXP_CLOUDRIPPER
#include <linux/acpm_dvfs.h>
#endif
+#include <soc/google/exynos_pm_qos.h>
#include "gxp-bpm.h"
#include "gxp-doorbell.h"
@@ -20,6 +22,24 @@
#include "gxp-lpm.h"
#include "gxp-pm.h"
+static const enum aur_power_state aur_state_array[] = { AUR_OFF, AUR_UUD,
+ AUR_SUD, AUR_UD,
+ AUR_NOM };
+static const uint aur_memory_state_array[] = {
+ AUR_MEM_UNDEFINED, AUR_MEM_MIN, AUR_MEM_VERY_LOW, AUR_MEM_LOW,
+ AUR_MEM_HIGH, AUR_MEM_VERY_HIGH, AUR_MEM_MAX
+};
+
+/*
+ * TODO(b/177692488): move frequency values into chip-specific config.
+ * TODO(b/221168126): survey how these value are derived from. Below
+ * values are copied from the implementation in TPU firmware for PRO,
+ * i.e. google3/third_party/darwinn/firmware/janeiro/power_manager.cc.
+ */
+static const s32 aur_memory_state2int_table[] = { 0, 0, 0, 200, 332, 465, 533 };
+static const s32 aur_memory_state2mif_table[] = { 0, 0, 0, 1014,
+ 1352, 2028, 3172 };
+
static struct gxp_pm_device_ops gxp_aur_ops = {
.pre_blk_powerup = NULL,
.post_blk_powerup = NULL,
@@ -67,6 +87,9 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
dev_err(gxp->dev, "%s: pm_runtime_put_sync returned %d\n",
__func__, ret);
#endif
+ /* Remove our vote for INT/MIF state (if any) */
+ exynos_pm_qos_update_request(&gxp->power_mgr->int_min, 0);
+ exynos_pm_qos_update_request(&gxp->power_mgr->mif_min, 0);
return ret;
}
@@ -81,6 +104,16 @@ int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state)
return ret;
}
+static void gxp_pm_blk_set_state_acpm_async(struct work_struct *work)
+{
+ struct gxp_set_acpm_state_work *set_acpm_state_work =
+ container_of(work, struct gxp_set_acpm_state_work, work);
+
+ mutex_lock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
+ gxp_pm_blk_set_state_acpm(set_acpm_state_work->gxp, set_acpm_state_work->state);
+ mutex_unlock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
+}
+
int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp)
{
int ret = 0;
@@ -130,6 +163,10 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
mutex_unlock(&gxp->power_mgr->pm_lock);
return -EBUSY;
}
+ if (gxp->power_mgr->curr_state == AUR_OFF) {
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ return ret;
+ }
/* Shutdown TOP's PSM */
gxp_lpm_destroy(gxp);
@@ -173,7 +210,6 @@ int gxp_pm_core_on(struct gxp_dev *gxp, uint core)
return ret;
}
- gxp->power_mgr->pwr_state_req[core] = gxp->power_mgr->curr_state;
mutex_unlock(&gxp->power_mgr->pm_lock);
dev_notice(gxp->dev, "%s: Core %d up\n", __func__, core);
@@ -189,7 +225,6 @@ int gxp_pm_core_off(struct gxp_dev *gxp, uint core)
mutex_lock(&gxp->power_mgr->pm_lock);
gxp_lpm_down(gxp, core);
- gxp->power_mgr->pwr_state_req[core] = AUR_OFF;
mutex_unlock(&gxp->power_mgr->pm_lock);
/*
* TODO: b/199467568 If all cores are off shutdown blk
@@ -198,60 +233,209 @@ int gxp_pm_core_off(struct gxp_dev *gxp, uint core)
return 0;
}
-int gxp_pm_get_core_state(struct gxp_dev *gxp, uint core)
+static int gxp_pm_req_state_locked(struct gxp_dev *gxp, enum aur_power_state state)
{
- int ret;
+ if (state > AUR_MAX_ALLOW_STATE) {
+ dev_err(gxp->dev, "Invalid state %d\n", state);
+ return -EINVAL;
+ }
+ if (state != gxp->power_mgr->curr_state) {
+ gxp->power_mgr->curr_state = state;
+ if (state == AUR_OFF) {
+ gxp_pm_blk_off(gxp);
+ } else {
+ gxp->power_mgr->set_acpm_rate_work.gxp = gxp;
+ gxp->power_mgr->set_acpm_rate_work.state = state;
+ queue_work(gxp->power_mgr->wq,
+ &gxp->power_mgr->set_acpm_rate_work.work);
+ }
+ }
+
+ return 0;
+}
+
+int gxp_pm_req_state(struct gxp_dev *gxp, enum aur_power_state state)
+{
+ int ret = 0;
mutex_lock(&gxp->power_mgr->pm_lock);
- ret = gxp->power_mgr->pwr_state_req[core];
+ ret = gxp_pm_req_state_locked(gxp, state);
mutex_unlock(&gxp->power_mgr->pm_lock);
-
return ret;
}
-int gxp_pm_req_state(struct gxp_dev *gxp, uint core, enum aur_power_state state)
+/* Caller must hold pm_lock */
+static int gxp_pm_revoke_power_state_vote(struct gxp_dev *gxp,
+ enum aur_power_state revoked_state)
+{
+ unsigned int i;
+
+ if (revoked_state == AUR_OFF)
+ return 0;
+ for (i = 0; i < AUR_NUM_POWER_STATE; i++) {
+ if (aur_state_array[i] == revoked_state) {
+ if (gxp->power_mgr->pwr_state_req_count[i] <= 0) {
+ dev_err(gxp->dev, "Invalid state %d\n",
+ revoked_state);
+ return -EINVAL;
+ }
+ gxp->power_mgr->pwr_state_req_count[i]--;
+ }
+ }
+ return 0;
+}
+
+/* Caller must hold pm_lock */
+static void gxp_pm_vote_power_state(struct gxp_dev *gxp,
+ enum aur_power_state state)
+{
+ unsigned int i;
+
+ if (state == AUR_OFF)
+ return;
+ for (i = 0; i < AUR_NUM_POWER_STATE; i++)
+ if (aur_state_array[i] == state)
+ gxp->power_mgr->pwr_state_req_count[i]++;
+}
+
+/* Caller must hold pm_lock */
+static unsigned long gxp_pm_get_max_voted_power_state(struct gxp_dev *gxp)
{
int i;
- unsigned long curr_max_state = AUR_OFF;
+ unsigned long state = AUR_OFF;
- if (core >= GXP_NUM_CORES) {
- dev_err(gxp->dev, "Invalid core num %d\n", core);
- return -EINVAL;
+ for (i = AUR_NUM_POWER_STATE - 1; i >= 0; i--) {
+ if (gxp->power_mgr->pwr_state_req_count[i] > 0) {
+ state = aur_state_array[i];
+ break;
+ }
}
+ return state;
+}
- if (state > AUR_MAX_ALLOW_STATE) {
- dev_err(gxp->dev, "Invalid state %d\n", state);
+int gxp_pm_update_requested_power_state(struct gxp_dev *gxp,
+ enum aur_power_state origin_state,
+ enum aur_power_state requested_state)
+{
+ int ret;
+ unsigned long max_state;
+
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ ret = gxp_pm_revoke_power_state_vote(gxp, origin_state);
+ if (ret < 0)
+ goto out;
+ gxp_pm_vote_power_state(gxp, requested_state);
+ max_state = gxp_pm_get_max_voted_power_state(gxp);
+ ret = gxp_pm_req_state_locked(gxp, max_state);
+out:
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ return ret;
+}
+
+static int gxp_pm_req_pm_qos(struct gxp_dev *gxp, s32 int_val, s32 mif_val)
+{
+ exynos_pm_qos_update_request(&gxp->power_mgr->int_min, int_val);
+ exynos_pm_qos_update_request(&gxp->power_mgr->mif_min, mif_val);
+ return 0;
+}
+
+static int gxp_pm_req_memory_state_locked(struct gxp_dev *gxp, enum aur_memory_power_state state)
+{
+ s32 int_val = 0, mif_val = 0;
+
+ if (state > AUR_MAX_ALLOW_MEMORY_STATE) {
+ dev_err(gxp->dev, "Invalid memory state %d\n", state);
return -EINVAL;
}
- mutex_lock(&gxp->power_mgr->pm_lock);
- gxp->power_mgr->pwr_state_req[core] = state;
- for (i = 0; i < GXP_NUM_CORES; i++) {
- if (gxp->power_mgr->pwr_state_req[i] >= curr_max_state)
- curr_max_state = gxp->power_mgr->pwr_state_req[i];
+ if (state != gxp->power_mgr->curr_memory_state) {
+ gxp->power_mgr->curr_memory_state = state;
+ int_val = aur_memory_state2int_table[state];
+ mif_val = aur_memory_state2mif_table[state];
+ gxp_pm_req_pm_qos(gxp, int_val, mif_val);
}
- if (state == AUR_OFF)
- gxp_pm_core_off(gxp, core);
- if (curr_max_state != gxp->power_mgr->curr_state &&
- curr_max_state > AUR_OFF) {
- gxp_pm_blk_set_state_acpm(gxp, curr_max_state);
- gxp->power_mgr->curr_state = curr_max_state;
- } else {
- /*
- * TODO: b/199467568 If all cores are off shutdown blk
- */
- }
- mutex_unlock(&gxp->power_mgr->pm_lock);
+ return 0;
+}
+/* Caller must hold pm_lock */
+static int gxp_pm_revoke_memory_power_state_vote(struct gxp_dev *gxp,
+ enum aur_memory_power_state revoked_state)
+{
+ unsigned int i;
+
+ if (revoked_state == AUR_MEM_UNDEFINED)
+ return 0;
+ for (i = 0; i < AUR_NUM_MEMORY_POWER_STATE; i++) {
+ if (aur_memory_state_array[i] == revoked_state) {
+ if (gxp->power_mgr->mem_pwr_state_req_count[i] == 0)
+ dev_err_ratelimited(
+ gxp->dev,
+ "Invalid memory state %d with zero count\n",
+ revoked_state);
+ else
+ gxp->power_mgr->mem_pwr_state_req_count[i]--;
+ return 0;
+ }
+ }
return 0;
}
+/* Caller must hold pm_lock */
+static void gxp_pm_vote_memory_power_state(struct gxp_dev *gxp,
+ enum aur_memory_power_state state)
+{
+ unsigned int i;
+
+ if (state == AUR_MEM_UNDEFINED)
+ return;
+ for (i = 0; i < AUR_NUM_MEMORY_POWER_STATE; i++) {
+ if (aur_memory_state_array[i] == state) {
+ gxp->power_mgr->mem_pwr_state_req_count[i]++;
+ return;
+ }
+ }
+}
+
+/* Caller must hold pm_lock */
+static unsigned long gxp_pm_get_max_voted_memory_power_state(struct gxp_dev *gxp)
+{
+ int i;
+ unsigned long state = AUR_MEM_UNDEFINED;
+
+ for (i = AUR_NUM_MEMORY_POWER_STATE - 1; i >= 0; i--) {
+ if (gxp->power_mgr->mem_pwr_state_req_count[i] > 0) {
+ state = aur_memory_state_array[i];
+ break;
+ }
+ }
+ return state;
+}
+
+int gxp_pm_update_requested_memory_power_state(
+ struct gxp_dev *gxp, enum aur_memory_power_state origin_state,
+ enum aur_memory_power_state requested_state)
+{
+ int ret;
+ unsigned long max_state;
+
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ ret = gxp_pm_revoke_memory_power_state_vote(gxp, origin_state);
+ if (ret < 0)
+ goto out;
+ gxp_pm_vote_memory_power_state(gxp, requested_state);
+ max_state = gxp_pm_get_max_voted_memory_power_state(gxp);
+ ret = gxp_pm_req_memory_state_locked(gxp, max_state);
+out:
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ return ret;
+}
+
int gxp_pm_acquire_blk_wakelock(struct gxp_dev *gxp)
{
mutex_lock(&gxp->power_mgr->pm_lock);
refcount_inc(&(gxp->power_mgr->blk_wake_ref));
dev_dbg(gxp->dev, "Blk wakelock ref count: %d\n",
- refcount_read(&(gxp->power_mgr->blk_wake_ref)));
+ refcount_read(&(gxp->power_mgr->blk_wake_ref)));
mutex_unlock(&gxp->power_mgr->pm_lock);
return 0;
}
@@ -275,7 +459,6 @@ int gxp_pm_release_blk_wakelock(struct gxp_dev *gxp)
int gxp_pm_init(struct gxp_dev *gxp)
{
struct gxp_power_manager *mgr;
- int i;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
if (!mgr)
@@ -283,15 +466,19 @@ int gxp_pm_init(struct gxp_dev *gxp)
mgr->gxp = gxp;
mutex_init(&mgr->pm_lock);
mgr->curr_state = AUR_OFF;
+ mgr->curr_memory_state = AUR_MEM_UNDEFINED;
refcount_set(&(mgr->blk_wake_ref), 0);
- for (i = 0; i < GXP_NUM_CORES; i++)
- mgr->pwr_state_req[i] = AUR_OFF;
mgr->ops = &gxp_aur_ops;
gxp->power_mgr = mgr;
+ INIT_WORK(&mgr->set_acpm_rate_work.work, gxp_pm_blk_set_state_acpm_async);
+ gxp->power_mgr->wq =
+ create_singlethread_workqueue("gxp_power_work_queue");
#if defined(CONFIG_GXP_CLOUDRIPPER) && !defined(CONFIG_GXP_TEST)
pm_runtime_enable(gxp->dev);
#endif
+ exynos_pm_qos_add_request(&mgr->int_min, PM_QOS_DEVICE_THROUGHPUT, 0);
+ exynos_pm_qos_add_request(&mgr->mif_min, PM_QOS_BUS_THROUGHPUT, 0);
return 0;
}
@@ -300,11 +487,13 @@ int gxp_pm_destroy(struct gxp_dev *gxp)
{
struct gxp_power_manager *mgr;
+ mgr = gxp->power_mgr;
+ exynos_pm_qos_remove_request(&mgr->int_min);
+ exynos_pm_qos_remove_request(&mgr->mif_min);
#if defined(CONFIG_GXP_CLOUDRIPPER) && !defined(CONFIG_GXP_TEST)
pm_runtime_disable(gxp->dev);
#endif
-
- mgr = gxp->power_mgr;
+ destroy_workqueue(mgr->wq);
mutex_destroy(&mgr->pm_lock);
return 0;
}
diff --git a/gxp-pm.h b/gxp-pm.h
index fdee637..73273f7 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -9,6 +9,7 @@
#include "gxp-internal.h"
#include <linux/refcount.h>
+#include <soc/google/exynos_pm_qos.h>
#define AUR_DVFS_MIN_STATE 178000
@@ -20,9 +21,24 @@ enum aur_power_state {
AUR_NOM = 1160000,
};
-#define AUR_INIT_DVFS_STATE AUR_NOM
+enum aur_memory_power_state {
+ AUR_MEM_UNDEFINED = 0,
+ AUR_MEM_MIN = 1,
+ AUR_MEM_VERY_LOW = 2,
+ AUR_MEM_LOW = 3,
+ AUR_MEM_HIGH = 4,
+ AUR_MEM_VERY_HIGH = 5,
+ AUR_MEM_MAX = 6,
+};
+
+#define AUR_NUM_POWER_STATE 5
+#define AUR_NUM_MEMORY_POWER_STATE (AUR_MAX_ALLOW_MEMORY_STATE + 1)
+
+#define AUR_INIT_DVFS_STATE AUR_UUD
#define AUR_MAX_ALLOW_STATE AUR_NOM
+#define AUR_MAX_ALLOW_MEMORY_STATE AUR_MEM_MAX
+
struct gxp_pm_device_ops {
int (*pre_blk_powerup)(struct gxp_dev *gxp);
int (*post_blk_powerup)(struct gxp_dev *gxp);
@@ -30,13 +46,26 @@ struct gxp_pm_device_ops {
int (*post_blk_poweroff)(struct gxp_dev *gxp);
};
+struct gxp_set_acpm_state_work {
+ struct work_struct work;
+ struct gxp_dev *gxp;
+ unsigned long state;
+};
+
struct gxp_power_manager {
struct gxp_dev *gxp;
struct mutex pm_lock;
- int pwr_state_req[GXP_NUM_CORES];
+ int pwr_state_req_count[AUR_NUM_POWER_STATE];
+ uint mem_pwr_state_req_count[AUR_NUM_MEMORY_POWER_STATE];
int curr_state;
+ int curr_memory_state;
refcount_t blk_wake_ref;
struct gxp_pm_device_ops *ops;
+ struct gxp_set_acpm_state_work set_acpm_rate_work;
+ struct workqueue_struct *wq;
+ /* INT/MIF requests for memory bandwidth */
+ struct exynos_pm_qos_request int_min;
+ struct exynos_pm_qos_request mif_min;
};
/**
@@ -91,16 +120,6 @@ int gxp_pm_core_on(struct gxp_dev *gxp, uint core);
int gxp_pm_core_off(struct gxp_dev *gxp, uint core);
/**
- * gxp_pm_get_core_state() - Get the core power state
- * @gxp: The GXP device to operate
- * @core: The core ID to get the state of
- *
- * Return:
- * * state - Frequency number in kHz the core requested
- */
-int gxp_pm_get_core_state(struct gxp_dev *gxp, uint core);
-
-/**
* gxp_pm_acquire_blk_wakelock() - Acquire blk wakelock
* to make sure block won't shutdown.
*
@@ -129,17 +148,15 @@ int gxp_pm_acquire_blk_wakelock(struct gxp_dev *gxp);
int gxp_pm_release_blk_wakelock(struct gxp_dev *gxp);
/**
- * gxp_pm_req_state() - API for a GXP core to vote for a
- * desired power state.
+ * gxp_pm_req_state() - API to request a desired power state.
* @gxp: The GXP device to operate
- * @core: Voting core ID
- * @state: State the core voting for
+ * @state: The requested state
*
* Return:
* * 0 - Voting registered
- * * -EINVAL - Invalid core num
+ * * -EINVAL - Invalid requested state
*/
-int gxp_pm_req_state(struct gxp_dev *gxp, uint core, enum aur_power_state state);
+int gxp_pm_req_state(struct gxp_dev *gxp, enum aur_power_state state);
/**
* gxp_pm_init() - API for initialize PM
@@ -187,4 +204,37 @@ int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state);
* * State - State number in Khz from ACPM
*/
int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp);
+
+/**
+ * gxp_pm_update_requested_power_state() - API for a GXP client to vote for a
+ * requested state.
+ * @gxp: The GXP device to operate.
+ * @origin_state: An existing old requested state, will be cleared. If this is
+ * the first vote, pass AUR_OFF.
+ * @requested_state: The new requested state.
+ *
+ * Return:
+ * * 0 - Voting registered
+ * * -EINVAL - Invalid original state or requested state
+ */
+int gxp_pm_update_requested_power_state(struct gxp_dev *gxp,
+ enum aur_power_state origin_state,
+ enum aur_power_state requested_state);
+
+/**
+ * gxp_pm_update_requested_memory_power_state() - API for a GXP client to vote for a
+ * requested memory power state.
+ * @gxp: The GXP device to operate.
+ * @origin_state: An existing old requested state, will be cleared. If this is
+ * the first vote, pass AUR_MEM_UNDEFINED.
+ * @requested_state: The new requested state.
+ *
+ * Return:
+ * * 0 - Voting registered
+ * * -EINVAL - Invalid original state or requested state
+ */
+int gxp_pm_update_requested_memory_power_state(
+ struct gxp_dev *gxp, enum aur_memory_power_state origin_state,
+ enum aur_memory_power_state requested_state);
+
#endif /* __GXP_PM_H__ */
diff --git a/gxp-tmp.h b/gxp-tmp.h
index 088dcdf..b813867 100644
--- a/gxp-tmp.h
+++ b/gxp-tmp.h
@@ -15,7 +15,7 @@
#define AURORA_SCRATCHPAD_LEN 0x00100000 /* 1M */
#else /* CONFIG_GXP_TEST */
-/* Firmware memory is shrinked in unit tests. */
+/* Firmware memory is shrunk in unit tests. */
#define AURORA_SCRATCHPAD_OFF 0x000F0000
#define AURORA_SCRATCHPAD_LEN 0x00010000
diff --git a/gxp-vd.c b/gxp-vd.c
index 7108797..a94461b 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -151,7 +151,7 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
gxp, core, GXP_REG_ETM_PWRCTL,
1 << GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT);
}
- }
+ }
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd) {
diff --git a/gxp.h b/gxp.h
index 868e88e..ccfb614 100644
--- a/gxp.h
+++ b/gxp.h
@@ -37,6 +37,8 @@
* - GXP_MAP_BUFFER
* - GXP_UNMAP_BUFFER
* - GXP_SYNC_BUFFER
+ * - GXP_MAP_DMABUF
+ * - GXP_UNMAP_DMABUF
*/
struct gxp_map_ioctl {
@@ -337,12 +339,12 @@ struct gxp_etm_get_trace_info_ioctl {
* that is used for decoding the trace.
*/
__u64 trace_header_addr;
- /*
- * Input:
+ /*
+ * Input:
* Trace data user space address to contain Trace RAM data.
* Note: trace_data field will be empty if type == 0
*/
- __u64 trace_data_addr;
+ __u64 trace_data_addr;
};
/* Retrieves trace header and/or trace data for decoding purposes. */
@@ -489,8 +491,9 @@ struct gxp_acquire_wakelock_ioctl {
__u32 components_to_wake;
/*
* Minimum power state to operate the entire DSP subsystem at until
- * the wakelock is released. One of the GXP_POWER_STATE_* defines
- * from above.
+ * the BLOCK wakelock is released. One of the GXP_POWER_STATE_* defines
+ * from above. Note that the requested power state will not be cleared
+ * if only the VIRTUAL_DEVICE wakelock is released.
*
* `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
* wakelock.
@@ -498,8 +501,9 @@ struct gxp_acquire_wakelock_ioctl {
__u32 gxp_power_state;
/*
* Memory interface power state to request from the system so long as
- * the wakelock is held. One of the MEMORY_POWER_STATE* defines from
- * above.
+ * the BLOCK wakelock is held. One of the MEMORY_POWER_STATE* defines
+ * from above. The requested memory power state will not be cleared if
+ * only the VIRTUAL_DEVICE wakelock is released.
*
* If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
* the memory interface power state will be made.
@@ -556,4 +560,56 @@ struct gxp_acquire_wakelock_ioctl {
*/
#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)
+struct gxp_map_dmabuf_ioctl {
+ /*
+ * Bitfield indicating which virtual cores to map the dma-buf for.
+ * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
+ *
+ * This field is not used by the unmap dma-buf IOCTL, which always
+ * unmaps a dma-buf for all cores it had been mapped for.
+ */
+ __u16 virtual_core_list;
+ __s32 dmabuf_fd; /* File descriptor of the dma-buf to map. */
+ /*
+ * Flags indicating mapping attribute requests from the runtime.
+ * Set RESERVED bits to 0 to ensure backwards compatibility.
+ *
+ * Bitfields:
+ * [1:0] - DMA_DIRECTION:
+ * 00 = DMA_BIDIRECTIONAL (host/device can write buffer)
+ * 01 = DMA_TO_DEVICE (host can write buffer)
+ * 10 = DMA_FROM_DEVICE (device can write buffer)
+ * Note: DMA_DIRECTION is the direction in which data moves
+ * from the host's perspective.
+ * [31:2] - RESERVED
+ */
+ __u32 flags;
+ /*
+ * Device address the dmabuf is mapped to.
+ * - GXP_MAP_DMABUF uses this field to return the address the dma-buf
+ * can be accessed from by the device.
+ * - GXP_UNMAP_DMABUF expects this field to contain the value from the
+ * mapping call, and uses it to determine which dma-buf to unmap.
+ */
+ __u64 device_address;
+};
+
+/*
+ * Map host buffer via its dma-buf FD.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
+#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)
+
+/*
+ * Un-map host buffer previously mapped by GXP_MAP_DMABUF.
+ *
+ * Only the @device_address field is used. Other fields are fetched from the
+ * kernel's internal records. It is recommended to use the argument that was
+ * passed in GXP_MAP_DMABUF to un-map the dma-buf.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
+#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
+
#endif /* __GXP_H__ */
diff --git a/include/soc/google/exynos_pm_qos.h b/include/soc/google/exynos_pm_qos.h
new file mode 100644
index 0000000..441b06d
--- /dev/null
+++ b/include/soc/google/exynos_pm_qos.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_EXYNOS_PM_QOS_H
+#define _LINUX_EXYNOS_PM_QOS_H
+/* interface for the exynos_pm_qos_power infrastructure of the linux kernel.
+ *
+ * Mark Gross <mgross@linux.intel.com>
+ */
+
+#include <linux/plist.h>
+#include <linux/workqueue.h>
+
+enum {
+ EXYNOS_PM_QOS_RESERVED = 0,
+ PM_QOS_CLUSTER0_FREQ_MIN,
+ PM_QOS_CLUSTER0_FREQ_MAX,
+ PM_QOS_CLUSTER1_FREQ_MIN,
+ PM_QOS_CLUSTER1_FREQ_MAX,
+ PM_QOS_CLUSTER2_FREQ_MIN,
+ PM_QOS_CLUSTER2_FREQ_MAX,
+ PM_QOS_DEVICE_THROUGHPUT,
+ PM_QOS_INTCAM_THROUGHPUT,
+ PM_QOS_DEVICE_THROUGHPUT_MAX,
+ PM_QOS_INTCAM_THROUGHPUT_MAX,
+ PM_QOS_BUS_THROUGHPUT,
+ PM_QOS_BUS_THROUGHPUT_MAX,
+ PM_QOS_DISPLAY_THROUGHPUT,
+ PM_QOS_DISPLAY_THROUGHPUT_MAX,
+ PM_QOS_CAM_THROUGHPUT,
+ PM_QOS_CAM_THROUGHPUT_MAX,
+ PM_QOS_MFC_THROUGHPUT,
+ PM_QOS_MFC_THROUGHPUT_MAX,
+ PM_QOS_TNR_THROUGHPUT,
+ PM_QOS_TNR_THROUGHPUT_MAX,
+ PM_QOS_BO_THROUGHPUT,
+ PM_QOS_BO_THROUGHPUT_MAX,
+ PM_QOS_GPU_THROUGHPUT_MIN,
+ PM_QOS_GPU_THROUGHPUT_MAX,
+ EXYNOS_PM_QOS_NUM_CLASSES,
+};
+
+struct exynos_pm_qos_request {
+ struct plist_node node;
+ int exynos_pm_qos_class;
+ struct delayed_work work; /* for exynos_pm_qos_update_request_timeout */
+ const char *func;
+ unsigned int line;
+};
+
+static inline void exynos_pm_qos_add_request(struct exynos_pm_qos_request *req,
+ int exynos_pm_qos_class, s32 value)
+{
+}
+
+static inline void
+exynos_pm_qos_update_request(struct exynos_pm_qos_request *req, s32 new_value)
+{
+}
+
+static inline void
+exynos_pm_qos_remove_request(struct exynos_pm_qos_request *req)
+{
+}
+
+#endif /* _LINUX_EXYNOS_PM_QOS_H */