summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora pro automerger <aurora-pro-automerger@google.com>2022-04-28 17:59:17 +0800
committerJohn Scheible <johnscheible@google.com>2022-05-02 22:33:52 +0000
commit27bed782f3a828674c0f1584cf355bf592c382be (patch)
tree79f6f3410eece25889b3cfc80dcd156ec5b543c6
parenta96a198c9328b06866df39ebd420b0f3fd58ce51 (diff)
downloadgs201-27bed782f3a828674c0f1584cf355bf592c382be.tar.gz
[Copybara Auto Merge] Merge branch 'gs201-release' into 'android13-gs-pixel-5.10'
gxp: check BLK is on during power state transition gxp: prepare more worker structures for async jobs gxp: Cleanup virt<->phys core translation APIs gxp: switch mux to make sure LPM works gxp: init has_vd_lock field of gxp_client gxp: Clean up variable names and update variable type gxp: remove gxp-tmp.h gxp: move scratchpad macros from tmp to firmware.h gxp: remove no-iommu support gxp: remove SYNC_ macros from tmp.h gxp: remove DOORBELL macros gxp: move PSM macros to lpm.h gxp: Check for valid VD in mb_eventfd IOCTLs gxp: Firmware startup and Core-On optimizations gxp: Move ownership of user response queues gxp: move macros from tmp.h to bpm.c gxp: remove legacy software mailbox support gxp: Add gxp-eventfd interface gxp: remove unused macros from gxp-tmp.h gxp: bind page tables per virtual device Bug: 176979630 Bug: 207037425 Bug: 207038856 Bug: 209083969 Bug: 225059930 Bug: 226211187 Bug: 227145352 Bug: 227693917 Bug: 227694164 Bug: 228233514 Bug: 228921329 Bug: 229095276 Bug: 229584236 GitOrigin-RevId: d2c00e3ee2d71e551d41adfa5bcc6bec79379db3 Signed-off-by: Todd Poynor <toddpoynor@google.com> Change-Id: Ia92e12a2ab46eadc2876bcdb7ed3c04e223b3901
-rw-r--r--Makefile27
-rw-r--r--gxp-bpm.c15
-rw-r--r--gxp-client.c32
-rw-r--r--gxp-client.h7
-rw-r--r--gxp-config.h4
-rw-r--r--gxp-csrs.h3
-rw-r--r--gxp-debug-dump.c20
-rw-r--r--gxp-dma-iommu-gem5.c196
-rw-r--r--gxp-dma-iommu.c637
-rw-r--r--gxp-dma-iommu.h20
-rw-r--r--gxp-dma-rmem.c615
-rw-r--r--gxp-dma.h300
-rw-r--r--gxp-dmabuf.c17
-rw-r--r--gxp-dmabuf.h13
-rw-r--r--gxp-eventfd.c72
-rw-r--r--gxp-eventfd.h53
-rw-r--r--gxp-firmware-data.c29
-rw-r--r--gxp-firmware.c93
-rw-r--r--gxp-firmware.h29
-rw-r--r--gxp-hw-mailbox-driver.c3
-rw-r--r--gxp-internal.h69
-rw-r--r--gxp-lpm.c26
-rw-r--r--gxp-lpm.h12
-rw-r--r--gxp-mailbox.c66
-rw-r--r--gxp-mailbox.h10
-rw-r--r--gxp-mapping.c54
-rw-r--r--gxp-mapping.h12
-rw-r--r--gxp-platform.c294
-rw-r--r--gxp-pm.c134
-rw-r--r--gxp-pm.h23
-rw-r--r--gxp-sw-mailbox-driver.c508
-rw-r--r--gxp-telemetry.c82
-rw-r--r--gxp-tmp.h69
-rw-r--r--gxp-vd.c192
-rw-r--r--gxp-vd.h28
-rw-r--r--gxp-wakelock.c10
-rw-r--r--gxp.h27
-rw-r--r--include/soc/google/exynos_pm_qos.h64
38 files changed, 1515 insertions, 2350 deletions
diff --git a/Makefile b/Makefile
index e4d2063..3b7a340 100644
--- a/Makefile
+++ b/Makefile
@@ -12,8 +12,10 @@ gxp-objs += \
gxp-debugfs.o \
gxp-dmabuf.o \
gxp-doorbell.o \
+ gxp-eventfd.o \
gxp-firmware.o \
gxp-firmware-data.o \
+ gxp-hw-mailbox-driver.o \
gxp-lpm.o \
gxp-mailbox.o \
gxp-mapping.o \
@@ -52,29 +54,12 @@ ifdef CONFIG_GXP_TEST
GXP_PLATFORM = CLOUDRIPPER
endif
-# Default to using the HW mailbox and SysMMU
-GXP_SW_MAILBOX ?= 0
-GXP_HAS_SYSMMU ?= 1
-
-# Setup the linked mailbox implementation and definitions.
-ifeq ($(GXP_SW_MAILBOX),1)
- ccflags-y += -DCONFIG_GXP_USE_SW_MAILBOX
- gxp-objs += gxp-sw-mailbox-driver.o
-else
- gxp-objs += gxp-hw-mailbox-driver.o
-endif
-
# Setup which version of the gxp-dma interface is used.
-ifeq ($(GXP_HAS_SYSMMU),1)
- ccflags-y += -DCONFIG_GXP_HAS_SYSMMU
- # For gem5, need to adopt dma interface without aux domain.
- ifeq ($(GXP_PLATFORM), GEM5)
- gxp-objs += gxp-dma-iommu-gem5.o
- else
- gxp-objs += gxp-dma-iommu.o
- endif
+# For gem5, need to adopt dma interface without aux domain.
+ifeq ($(GXP_PLATFORM), GEM5)
+ gxp-objs += gxp-dma-iommu-gem5.o
else
- gxp-objs += gxp-dma-rmem.o
+ gxp-objs += gxp-dma-iommu.o
endif
ccflags-y += -DCONFIG_GXP_$(GXP_PLATFORM)
diff --git a/gxp-bpm.c b/gxp-bpm.c
index c440bf5..50a41f1 100644
--- a/gxp-bpm.c
+++ b/gxp-bpm.c
@@ -18,28 +18,31 @@
#define BPM_CNTR_CONFIG_OFFSET 0x18
#define BPM_SNAPSHOT_CNTR_OFFSET 0x98
+#define BPM_DISABLE 0x0
+#define BPM_ENABLE 0x1
+
void gxp_bpm_configure(struct gxp_dev *gxp, u8 core, u32 bpm_offset, u32 event)
{
- u32 val =
- ((event & BPM_EVENT_TYPE_MASK) << BPM_EVENT_TYPE_BIT) | ENABLE;
+ u32 val = ((event & BPM_EVENT_TYPE_MASK) << BPM_EVENT_TYPE_BIT) |
+ BPM_ENABLE;
u32 bpm_base = GXP_REG_INST_BPM + bpm_offset;
/* Configure event */
gxp_write_32_core(gxp, core, bpm_base + BPM_CNTR_CONFIG_OFFSET, val);
/* Arm counter */
- gxp_write_32_core(gxp, core, bpm_base + BPM_CONFIG_OFFSET, ENABLE);
+ gxp_write_32_core(gxp, core, bpm_base + BPM_CONFIG_OFFSET, BPM_ENABLE);
}
void gxp_bpm_start(struct gxp_dev *gxp, u8 core)
{
gxp_write_32_core(gxp, core, GXP_REG_PROFILING_CONDITION,
- ENABLE << BPM_START_BIT);
+ BPM_ENABLE << BPM_START_BIT);
}
void gxp_bpm_stop(struct gxp_dev *gxp, u8 core)
{
gxp_write_32_core(gxp, core, GXP_REG_PROFILING_CONDITION,
- ENABLE << BPM_STOP_BIT);
+ BPM_ENABLE << BPM_STOP_BIT);
}
u32 gxp_bpm_read_counter(struct gxp_dev *gxp, u8 core, u32 bpm_offset)
@@ -47,7 +50,7 @@ u32 gxp_bpm_read_counter(struct gxp_dev *gxp, u8 core, u32 bpm_offset)
u32 bpm_base = GXP_REG_INST_BPM + bpm_offset;
/* Disarm counter */
- gxp_write_32_core(gxp, core, bpm_base + BPM_CONFIG_OFFSET, DISABLE);
+ gxp_write_32_core(gxp, core, bpm_base + BPM_CONFIG_OFFSET, BPM_DISABLE);
/* Read final counter value */
return gxp_read_32_core(gxp, core, bpm_base + BPM_SNAPSHOT_CNTR_OFFSET);
}
diff --git a/gxp-client.c b/gxp-client.c
index 7ff4b5c..5838d82 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -19,13 +19,14 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
{
struct gxp_client *client;
- client = kmalloc(sizeof(*client), GFP_KERNEL);
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);
client->gxp = gxp;
init_rwsem(&client->semaphore);
client->has_block_wakelock = false;
+ client->has_vd_wakelock = false;
client->requested_power_state = AUR_OFF;
client->requested_memory_power_state = 0;
client->vd = NULL;
@@ -46,7 +47,7 @@ void gxp_client_destroy(struct gxp_client *client)
* Unmap TPU buffers, if the mapping is already removed, this
* is a no-op.
*/
- gxp_dma_unmap_tpu_buffer(gxp, client->mbx_desc);
+ gxp_dma_unmap_tpu_buffer(gxp, client->vd, client->mbx_desc);
#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
if (client->has_vd_wakelock)
@@ -54,7 +55,7 @@ void gxp_client_destroy(struct gxp_client *client)
for (core = 0; core < GXP_NUM_CORES; core++) {
if (client->mb_eventfds[core])
- eventfd_ctx_put(client->mb_eventfds[core]);
+ gxp_eventfd_put(client->mb_eventfds[core]);
}
up_write(&gxp->vd_semaphore);
@@ -69,29 +70,8 @@ void gxp_client_destroy(struct gxp_client *client)
AUR_MEM_UNDEFINED);
}
- gxp_vd_release(client->vd);
+ if (client->vd)
+ gxp_vd_release(client->vd);
kfree(client);
}
-
-void gxp_client_signal_mailbox_eventfd(struct gxp_client *client,
- uint phys_core)
-{
- int virtual_core;
-
- down_read(&client->semaphore);
-
- virtual_core = gxp_vd_phys_core_to_virt_core(client->vd, phys_core);
- if (unlikely(virtual_core < 0)) {
- dev_err(client->gxp->dev,
- "%s: core %d is not part of client's virtual device.\n",
- __func__, phys_core);
- goto out;
- }
-
- if (client->mb_eventfds[virtual_core])
- eventfd_signal(client->mb_eventfds[virtual_core], 1);
-
-out:
- up_read(&client->semaphore);
-}
diff --git a/gxp-client.h b/gxp-client.h
index 4f6fe8e..3b0719b 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -7,11 +7,11 @@
#ifndef __GXP_CLIENT_H__
#define __GXP_CLIENT_H__
-#include <linux/eventfd.h>
#include <linux/rwsem.h>
#include <linux/types.h>
#include "gxp-internal.h"
+#include "gxp-eventfd.h"
#include "gxp-vd.h"
/* Holds state belonging to a client */
@@ -37,7 +37,7 @@ struct gxp_client {
bool tpu_mbx_allocated;
struct gxp_tpu_mbx_desc mbx_desc;
- struct eventfd_ctx *mb_eventfds[GXP_NUM_CORES];
+ struct gxp_eventfd *mb_eventfds[GXP_NUM_CORES];
};
/*
@@ -51,7 +51,4 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp);
*/
void gxp_client_destroy(struct gxp_client *client);
-void gxp_client_signal_mailbox_eventfd(struct gxp_client *client,
- uint phys_core);
-
#endif /* __GXP_CLIENT_H__ */
diff --git a/gxp-config.h b/gxp-config.h
index d17b071..2899288 100644
--- a/gxp-config.h
+++ b/gxp-config.h
@@ -21,6 +21,10 @@
#define GXP_TIME_DELAY_FACTOR 1
#endif
+#define DOORBELL_COUNT 32
+
+#define SYNC_BARRIER_COUNT 16
+
#include "gxp-csrs.h"
/* Core address space starts at Inst_BPM block */
diff --git a/gxp-csrs.h b/gxp-csrs.h
index 662837f..739e41f 100644
--- a/gxp-csrs.h
+++ b/gxp-csrs.h
@@ -57,9 +57,6 @@ enum gxp_core_csrs {
GXP_REG_ETM_PWRCTL = 0xB020,
};
-#define DOORBELL_COUNT 32
-
-#define SYNC_BARRIER_COUNT 16
#define SYNC_BARRIER_SHADOW_OFFSET 0x800
#define CORE_PD_BASE(_x_) ((_x_) << 2)
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index bbc4635..d8fd973 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -21,12 +21,14 @@
#include "gxp-doorbell.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
-#include "gxp-tmp.h"
#define GXP_COREDUMP_PENDING 0xF
#define KERNEL_INIT_DUMP_TIMEOUT (10000 * GXP_TIME_DELAY_FACTOR)
#define SSCD_MSG_LENGTH 64
+#define SYNC_BARRIER_BLOCK 0x00100000
+#define SYNC_BARRIER_BASE(_x_) ((_x_) << 12)
+
/* Enum indicating the debug dump request reason. */
enum gxp_debug_dump_init_type {
DEBUG_DUMP_FW_INIT,
@@ -50,6 +52,22 @@ static void gxp_debug_dump_cache_flush(struct gxp_dev *gxp)
return;
}
+static u32 gxp_read_sync_barrier_shadow(struct gxp_dev *gxp, uint index)
+{
+ uint barrier_reg_offset;
+
+ if (index >= SYNC_BARRIER_COUNT) {
+ dev_err(gxp->dev,
+ "Attempt to read non-existent sync barrier: %0u\n",
+ index);
+ return 0;
+ }
+
+ barrier_reg_offset = SYNC_BARRIER_BLOCK + SYNC_BARRIER_BASE(index) +
+ SYNC_BARRIER_SHADOW_OFFSET;
+
+ return gxp_read_32(gxp, barrier_reg_offset);
+}
static void
gxp_get_common_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
diff --git a/gxp-dma-iommu-gem5.c b/gxp-dma-iommu-gem5.c
index 321b560..86a2c06 100644
--- a/gxp-dma-iommu-gem5.c
+++ b/gxp-dma-iommu-gem5.c
@@ -106,11 +106,25 @@ void gxp_dma_exit(struct gxp_dev *gxp)
/* Offset from mailbox base to the device interface that needs to be mapped */
#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x10000
-int gxp_dma_map_resources(struct gxp_dev *gxp)
+void gxp_dma_init_default_resources(struct gxp_dev *gxp)
+{
+ unsigned int core;
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ gxp->mbx[core].daddr = GXP_IOVA_MAILBOX(core);
+ gxp->fwbufs[core].daddr = GXP_IOVA_FIRMWARE(core);
+ }
+ gxp->regs.daddr = GXP_IOVA_AURORA_TOP;
+ gxp->coredumpbuf.daddr = GXP_IOVA_CORE_DUMP;
+ gxp->fwdatabuf.daddr = GXP_IOVA_FW_DATA;
+}
+
+int gxp_dma_map_core_resources(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core)
{
struct gxp_dma_iommu_manager *mgr = container_of(
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- unsigned int core;
int ret = 0;
ret = iommu_map(mgr->default_domain, GXP_IOVA_AURORA_TOP,
@@ -147,30 +161,23 @@ int gxp_dma_map_resources(struct gxp_dev *gxp)
IOMMU_READ | IOMMU_WRITE);
if (ret)
goto err;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- ret = iommu_map(mgr->default_domain, GXP_IOVA_MAILBOX(core),
- gxp->mbx[core].paddr +
- MAILBOX_DEVICE_INTERFACE_OFFSET,
- gxp->mbx[core].size, IOMMU_READ | IOMMU_WRITE);
+ ret = iommu_map(mgr->default_domain, GXP_IOVA_MAILBOX(core),
+ gxp->mbx[core].paddr +
+ MAILBOX_DEVICE_INTERFACE_OFFSET,
+ gxp->mbx[core].size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ /* Only map the TPU mailboxes if they were found on probe */
+ if (gxp->tpu_dev.mbx_paddr) {
+ ret = iommu_map(
+ mgr->default_domain,
+ GXP_IOVA_EXT_TPU_MBX + core * EXT_TPU_MBX_SIZE,
+ gxp->tpu_dev.mbx_paddr +
+ core * EXT_TPU_MBX_SIZE,
+ EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE);
if (ret)
goto err;
- /* Only map the TPU mailboxes if they were found on probe */
- if (gxp->tpu_dev.mbx_paddr) {
- ret = iommu_map(
- mgr->default_domain,
- GXP_IOVA_EXT_TPU_MBX + core * EXT_TPU_MBX_SIZE,
- gxp->tpu_dev.mbx_paddr +
- core * EXT_TPU_MBX_SIZE,
- EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- }
- gxp->mbx[core].daddr = GXP_IOVA_MAILBOX(core);
- gxp->fwbufs[core].daddr = GXP_IOVA_FIRMWARE(core);
}
- gxp->regs.daddr = GXP_IOVA_AURORA_TOP;
- gxp->coredumpbuf.daddr = GXP_IOVA_CORE_DUMP;
- gxp->fwdatabuf.daddr = GXP_IOVA_FW_DATA;
return ret;
@@ -180,15 +187,16 @@ err:
* Any resource that hadn't been mapped yet will cause `iommu_unmap()`
* to return immediately, so its safe to try to unmap everything.
*/
- gxp_dma_unmap_resources(gxp);
+ gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
return ret;
}
-void gxp_dma_unmap_resources(struct gxp_dev *gxp)
+void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core)
{
struct gxp_dma_iommu_manager *mgr = container_of(
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- unsigned int core;
iommu_unmap(mgr->default_domain, GXP_IOVA_AURORA_TOP, gxp->regs.size);
iommu_unmap(mgr->default_domain, GXP_IOVA_SYNC_BARRIERS,
@@ -203,16 +211,14 @@ void gxp_dma_unmap_resources(struct gxp_dev *gxp)
iommu_unmap(mgr->default_domain, GXP_IOVA_CORE_DUMP,
gxp->coredumpbuf.size);
iommu_unmap(mgr->default_domain, GXP_IOVA_FW_DATA, gxp->fwdatabuf.size);
- for (core = 0; core < GXP_NUM_CORES; core++) {
- iommu_unmap(mgr->default_domain, GXP_IOVA_MAILBOX(core),
- gxp->mbx[core].size);
- /* Only unmap the TPU mailboxes if they were found on probe */
- if (gxp->tpu_dev.mbx_paddr) {
- iommu_unmap(mgr->default_domain,
- GXP_IOVA_EXT_TPU_MBX +
- core * EXT_TPU_MBX_SIZE,
- EXT_TPU_MBX_SIZE);
- }
+ iommu_unmap(mgr->default_domain, GXP_IOVA_MAILBOX(core),
+ gxp->mbx[core].size);
+ /* Only unmap the TPU mailboxes if they were found on probe */
+ if (gxp->tpu_dev.mbx_paddr) {
+ iommu_unmap(mgr->default_domain,
+ GXP_IOVA_EXT_TPU_MBX +
+ core * EXT_TPU_MBX_SIZE,
+ EXT_TPU_MBX_SIZE);
}
}
@@ -285,21 +291,24 @@ static inline struct sg_table *alloc_sgt_for_buffer(void *ptr, size_t size,
}
#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
-int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, uint core_list
struct edgetpu_ext_mailbox_info *mbx_info)
{
struct gxp_dma_iommu_manager *mgr = container_of(
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- uint orig_core_list = core_list;
+ uint orig_virt_core_list = virt_core_list;
u64 queue_iova;
int core;
int ret;
int i = 0;
- while (core_list) {
+ while (virt_core_list) {
phys_addr_t cmdq_pa = mbx_info->mailboxes[i].cmdq_pa;
phys_addr_t respq_pa = mbx_info->mailboxes[i++].respq_pa;
+ virt_core = ffs(virt_core_list) - 1;
+ virt_core_list &= ~BIT(virt_core);
core = ffs(core_list) - 1;
core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
@@ -319,8 +328,10 @@ int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
return 0;
error:
- core_list ^= orig_core_list;
- while (core_list) {
+ virt_core_list ^= orig_virt_core_list;
+ while (virt_core_list) {
+ virt_core = ffs(virt_core_list) - 1;
+ virt_core_list &= ~BIT(virt_core);
core = ffs(core_list) - 1;
core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
@@ -334,15 +345,20 @@ error:
}
void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
struct gxp_tpu_mbx_desc mbx_desc)
{
struct gxp_dma_iommu_manager *mgr = container_of(
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ uint virt_core_list = mbx_desc.virt_core_list;
uint core_list = mbx_desc.phys_core_list;
u64 queue_iova;
int core;
+ uint virt_core;
- while (core_list) {
+ while (virt_core_list) {
+ virt_core = ffs(virt_core_list) - 1;
+ virt_core_list &= ~BIT(virt_core);
core = ffs(core_list) - 1;
core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
@@ -355,7 +371,40 @@ void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
}
#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
-void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
+int gxp_dma_domain_attach_device(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core)
+{
+ /* NO-OP when aux domains are not supported */
+ return 0;
+}
+
+void gxp_dma_domain_detach_device(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core)
+{
+ /* NO-OP when aux domains are not supported */
+}
+
+int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size,
+ dma_addr_t dma_handle,
+ uint gxp_dma_flags)
+{
+ /* NO-OP when aux domains are not supported */
+ return 0;
+}
+
+void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size,
+ dma_addr_t dma_handle)
+{
+ /* NO-OP when aux domains are not supported */
+}
+
+void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
uint gxp_dma_flags)
{
@@ -380,16 +429,18 @@ void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
return buf;
}
-void gxp_dma_free_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+void gxp_dma_free_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
{
size = size < PAGE_SIZE ? PAGE_SIZE : size;
dma_free_coherent(gxp->dev, size, cpu_addr, dma_handle);
}
-dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list,
- void *cpu_addr, size_t size,
+dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, void *cpu_addr, size_t size,
enum dma_data_direction direction,
unsigned long attrs, uint gxp_dma_flags)
{
@@ -403,17 +454,18 @@ dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list,
return daddr;
}
-void gxp_dma_unmap_single(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
+void gxp_dma_unmap_single(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
dma_unmap_single_attrs(gxp->dev, dma_addr, size, direction, attrs);
}
-dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list,
- struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction,
+dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
unsigned long attrs, uint gxp_dma_flags)
{
dma_addr_t daddr;
@@ -426,16 +478,17 @@ dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list,
return daddr;
}
-void gxp_dma_unmap_page(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
+void gxp_dma_unmap_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
dma_unmap_page_attrs(gxp->dev, dma_addr, size, direction, attrs);
}
-dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp, uint core_list,
- phys_addr_t phys_addr, size_t size,
- enum dma_data_direction direction,
+dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction direction,
unsigned long attrs, uint gxp_dma_flags)
{
dma_addr_t daddr;
@@ -447,23 +500,24 @@ dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp, uint core_list,
return daddr;
}
-void gxp_dma_unmap_resource(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction,
+void gxp_dma_unmap_resource(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
dma_unmap_resource(gxp->dev, dma_addr, size, direction, attrs);
}
-int gxp_dma_map_sg(struct gxp_dev *gxp, uint core_list, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
+int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ int virt_core_list, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction, unsigned long attrs,
+ uint gxp_dma_flags)
{
return dma_map_sg_attrs(gxp->dev, sg, nents, direction, attrs);
}
-void gxp_dma_unmap_sg(struct gxp_dev *gxp, uint core_list,
- struct scatterlist *sg, int nents,
+void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs)
{
dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs);
@@ -498,15 +552,17 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
dma_sync_sg_for_device(gxp->dev, sg, nents, direction);
}
-struct sg_table *
-gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
- struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
+struct sg_table *gxp_dma_map_dmabuf_attachment(
+ struct gxp_dev *gxp, struct gxp_virtual_device *vd, uint virt_core_list,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
{
return dma_buf_map_attachment(attachment, direction);
}
-void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list,
struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction)
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index 77b9d31..caedac3 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -13,16 +13,14 @@
#include "gxp-config.h"
#include "gxp-dma.h"
-#include "gxp-dma-iommu.h"
#include "gxp-iova.h"
#include "gxp-mapping.h"
#include "gxp-pm.h"
+#include "gxp-vd.h"
struct gxp_dma_iommu_manager {
struct gxp_dma_manager dma_mgr;
struct iommu_domain *default_domain;
- struct iommu_domain *core_domains[GXP_NUM_CORES];
- int core_vids[GXP_NUM_CORES];
void __iomem *idma_ssmt_base;
void __iomem *inst_data_ssmt_base;
};
@@ -71,26 +69,26 @@ static inline void ssmt_set_vid_for_sid(void __iomem *ssmt, int vid, u8 sid)
writel(vid, (ssmt) + 0x1200u + (0x4u * (sid)));
}
-int gxp_dma_ssmt_program(struct gxp_dev *gxp)
+static int gxp_dma_ssmt_program(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core)
{
/* SSMT is not supported in unittests */
#ifndef CONFIG_GXP_TEST
struct gxp_dma_iommu_manager *mgr = container_of(
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- unsigned int core;
-
- for (core = 0; core < GXP_NUM_CORES; core++) {
- ssmt_set_vid_for_sid(mgr->idma_ssmt_base, mgr->core_vids[core],
- IDMA_SID_FOR_CORE(core));
- ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base,
- mgr->core_vids[core],
- INST_SID_FOR_CORE(core));
- ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base,
- mgr->core_vids[core],
- DATA_SID_FOR_CORE(core));
- }
+ int core_vid;
+
+ core_vid = iommu_aux_get_pasid(vd->core_domains[virt_core], gxp->dev);
+ dev_dbg(gxp->dev, "SysMMU: core%u assigned vid %d\n", core,
+ core_vid);
+ ssmt_set_vid_for_sid(mgr->idma_ssmt_base, core_vid,
+ IDMA_SID_FOR_CORE(core));
+ ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, core_vid,
+ INST_SID_FOR_CORE(core));
+ ssmt_set_vid_for_sid(mgr->inst_data_ssmt_base, core_vid,
+ DATA_SID_FOR_CORE(core));
#endif
-
return 0;
}
@@ -176,7 +174,6 @@ static int sysmmu_fault_handler(struct iommu_fault *fault, void *token)
int gxp_dma_init(struct gxp_dev *gxp)
{
struct gxp_dma_iommu_manager *mgr;
- unsigned int core;
int ret;
/* GXP can only address 32-bit IOVAs */
@@ -218,30 +215,10 @@ int gxp_dma_init(struct gxp_dev *gxp)
goto err_unreg_fault_handler;
}
- for (core = 0; core < GXP_NUM_CORES; core++) {
- mgr->core_domains[core] = iommu_domain_alloc(gxp->dev->bus);
- if (iommu_aux_attach_device(mgr->core_domains[core],
- gxp->dev)) {
- iommu_domain_free(mgr->core_domains[core]);
- goto err_detach_aux_domains;
- }
- mgr->core_vids[core] =
- iommu_aux_get_pasid(mgr->core_domains[core], gxp->dev);
- dev_notice(gxp->dev, "SysMMU: core%u assigned vid %d\n", core,
- mgr->core_vids[core]);
- }
-
gxp->dma_mgr = &(mgr->dma_mgr);
return 0;
-err_detach_aux_domains:
- /* Detach and free any aux domains successfully setup */
- for (core -= 1; core >= 0; core--) {
- iommu_aux_detach_device(mgr->core_domains[core], gxp->dev);
- iommu_domain_free(mgr->core_domains[core]);
- }
-
err_unreg_fault_handler:
if (iommu_unregister_device_fault_handler(gxp->dev))
dev_err(gxp->dev,
@@ -252,24 +229,6 @@ err_unreg_fault_handler:
void gxp_dma_exit(struct gxp_dev *gxp)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- unsigned int core;
-
- /*
- * The SysMMU driver writes registers in the SysMMU during
- * `iommu_aux_detach_device()`, to disable that domain's VID and flush
- * its TLB. BLK_AUR must be powered on for these writes to succeed.
- */
- gxp_pm_blk_on(gxp);
-
- for (core = 0; core < GXP_NUM_CORES; core++) {
- iommu_aux_detach_device(mgr->core_domains[core], gxp->dev);
- iommu_domain_free(mgr->core_domains[core]);
- }
-
- gxp_pm_blk_off(gxp);
-
if (iommu_unregister_device_fault_handler(gxp->dev))
dev_err(gxp->dev,
"Failed to unregister SysMMU fault handler\n");
@@ -282,72 +241,95 @@ void gxp_dma_exit(struct gxp_dev *gxp)
/* Offset from mailbox base to the device interface that needs to be mapped */
#define MAILBOX_DEVICE_INTERFACE_OFFSET 0x10000
-int gxp_dma_map_resources(struct gxp_dev *gxp)
+void gxp_dma_init_default_resources(struct gxp_dev *gxp)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
unsigned int core;
- int ret = 0;
for (core = 0; core < GXP_NUM_CORES; core++) {
- ret = iommu_map(mgr->core_domains[core], GXP_IOVA_AURORA_TOP,
- gxp->regs.paddr, gxp->regs.size,
- IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- /*
- * Firmware expects to access the sync barriers at a separate
- * address, lower than the rest of the AURORA_TOP registers.
- */
- ret = iommu_map(mgr->core_domains[core], GXP_IOVA_SYNC_BARRIERS,
- gxp->regs.paddr + SYNC_BARRIERS_TOP_OFFSET,
- SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- ret = iommu_map(mgr->core_domains[core], GXP_IOVA_MAILBOX(core),
- gxp->mbx[core].paddr +
- MAILBOX_DEVICE_INTERFACE_OFFSET,
- gxp->mbx[core].size, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- /*
- * TODO(b/202213606): Map FW regions of all cores in a VD for
- * each other at VD creation.
- */
- ret = iommu_map(mgr->core_domains[core], GXP_IOVA_FIRMWARE(0),
- gxp->fwbufs[0].paddr,
- gxp->fwbufs[0].size * GXP_NUM_CORES,
- IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- ret = iommu_map(mgr->core_domains[core], GXP_IOVA_CORE_DUMP,
- gxp->coredumpbuf.paddr, gxp->coredumpbuf.size,
- IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- ret = iommu_map(mgr->core_domains[core], GXP_IOVA_FW_DATA,
- gxp->fwdatabuf.paddr, gxp->fwdatabuf.size,
- IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- /* Only map the TPU mailboxes if they were found on probe */
- if (gxp->tpu_dev.mbx_paddr) {
- ret = iommu_map(
- mgr->core_domains[core],
- GXP_IOVA_EXT_TPU_MBX + core * EXT_TPU_MBX_SIZE,
- gxp->tpu_dev.mbx_paddr +
- core * EXT_TPU_MBX_SIZE,
- EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE);
- if (ret)
- goto err;
- }
gxp->mbx[core].daddr = GXP_IOVA_MAILBOX(core);
gxp->fwbufs[core].daddr = GXP_IOVA_FIRMWARE(core);
}
gxp->regs.daddr = GXP_IOVA_AURORA_TOP;
gxp->coredumpbuf.daddr = GXP_IOVA_CORE_DUMP;
gxp->fwdatabuf.daddr = GXP_IOVA_FW_DATA;
+}
+
+int gxp_dma_domain_attach_device(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core)
+{
+ int ret;
+
+ ret = iommu_aux_attach_device(vd->core_domains[virt_core], gxp->dev);
+ if (ret)
+ goto out;
+ gxp_dma_ssmt_program(gxp, vd, virt_core, core);
+out:
+ return ret;
+}
+
+void gxp_dma_domain_detach_device(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core)
+{
+ iommu_aux_detach_device(vd->core_domains[virt_core], gxp->dev);
+}
+int gxp_dma_map_core_resources(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core)
+{
+ int ret;
+
+ ret = iommu_map(vd->core_domains[virt_core], gxp->regs.daddr,
+ gxp->regs.paddr, gxp->regs.size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ /*
+ * Firmware expects to access the sync barriers at a separate
+ * address, lower than the rest of the AURORA_TOP registers.
+ */
+ ret = iommu_map(vd->core_domains[virt_core], GXP_IOVA_SYNC_BARRIERS,
+ gxp->regs.paddr + SYNC_BARRIERS_TOP_OFFSET,
+ SYNC_BARRIERS_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ ret = iommu_map(vd->core_domains[virt_core], gxp->mbx[core].daddr,
+ gxp->mbx[core].paddr + MAILBOX_DEVICE_INTERFACE_OFFSET,
+ gxp->mbx[core].size, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ /*
+ * TODO(b/202213606): Map FW regions of all cores in a VD for
+ * each other at VD creation.
+ */
+ ret = iommu_map(vd->core_domains[virt_core], gxp->fwbufs[0].daddr,
+ gxp->fwbufs[0].paddr,
+ gxp->fwbufs[0].size * GXP_NUM_CORES,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ ret = iommu_map(vd->core_domains[virt_core], gxp->coredumpbuf.daddr,
+ gxp->coredumpbuf.paddr, gxp->coredumpbuf.size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ ret = iommu_map(vd->core_domains[virt_core], gxp->fwdatabuf.daddr,
+ gxp->fwdatabuf.paddr, gxp->fwdatabuf.size,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ /* Only map the TPU mailboxes if they were found on probe */
+ if (gxp->tpu_dev.mbx_paddr) {
+ ret = iommu_map(
+ vd->core_domains[virt_core],
+ GXP_IOVA_EXT_TPU_MBX + core * EXT_TPU_MBX_SIZE,
+ gxp->tpu_dev.mbx_paddr +
+ core * EXT_TPU_MBX_SIZE,
+ EXT_TPU_MBX_SIZE, IOMMU_READ | IOMMU_WRITE);
+ if (ret)
+ goto err;
+ }
return ret;
err:
@@ -356,42 +338,38 @@ err:
* Any resource that hadn't been mapped yet will cause `iommu_unmap()`
* to return immediately, so its safe to try to unmap everything.
*/
- gxp_dma_unmap_resources(gxp);
+ gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
return ret;
}
-void gxp_dma_unmap_resources(struct gxp_dev *gxp)
+void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- unsigned int core;
-
- for (core = 0; core < GXP_NUM_CORES; core++) {
- iommu_unmap(mgr->core_domains[core], GXP_IOVA_AURORA_TOP,
- gxp->regs.size);
- iommu_unmap(mgr->core_domains[core], GXP_IOVA_SYNC_BARRIERS,
- SYNC_BARRIERS_SIZE);
- iommu_unmap(mgr->core_domains[core], GXP_IOVA_MAILBOX(core),
- gxp->mbx[core].size);
- /*
- * TODO(b/202213606): A core should only have access to the FW
- * of other cores if they're in the same VD, and have the FW
- * region unmapped on VD destruction.
- */
- iommu_unmap(mgr->core_domains[core], GXP_IOVA_FIRMWARE(0),
- gxp->fwbufs[0].size * GXP_NUM_CORES);
- iommu_unmap(mgr->core_domains[core], GXP_IOVA_CORE_DUMP,
- gxp->coredumpbuf.size);
- iommu_unmap(mgr->core_domains[core], GXP_IOVA_FW_DATA,
- gxp->fwdatabuf.size);
- /* Only unmap the TPU mailboxes if they were found on probe */
- if (gxp->tpu_dev.mbx_paddr) {
- iommu_unmap(mgr->core_domains[core],
- GXP_IOVA_EXT_TPU_MBX +
- core * EXT_TPU_MBX_SIZE,
- EXT_TPU_MBX_SIZE);
- }
- }
+ /* Only unmap the TPU mailboxes if they were found on probe */
+ if (gxp->tpu_dev.mbx_paddr) {
+ iommu_unmap(vd->core_domains[virt_core],
+ GXP_IOVA_EXT_TPU_MBX +
+ core * EXT_TPU_MBX_SIZE,
+ EXT_TPU_MBX_SIZE);
+ }
+ iommu_unmap(vd->core_domains[virt_core], gxp->fwdatabuf.daddr,
+ gxp->fwdatabuf.size);
+ iommu_unmap(vd->core_domains[virt_core], gxp->coredumpbuf.daddr,
+ gxp->coredumpbuf.size);
+ /*
+ * TODO(b/202213606): A core should only have access to the FW
+ * of other cores if they're in the same VD, and have the FW
+ * region unmapped on VD destruction.
+ */
+ iommu_unmap(vd->core_domains[virt_core], gxp->fwbufs[0].daddr,
+ gxp->fwbufs[0].size * GXP_NUM_CORES);
+ iommu_unmap(vd->core_domains[virt_core], gxp->mbx[core].daddr,
+ gxp->mbx[core].size);
+ iommu_unmap(vd->core_domains[virt_core], GXP_IOVA_SYNC_BARRIERS,
+ SYNC_BARRIERS_SIZE);
+ iommu_unmap(vd->core_domains[virt_core], gxp->regs.daddr,
+ gxp->regs.size);
}
static inline struct sg_table *
@@ -464,33 +442,35 @@ alloc_sgt_for_buffer(void *ptr, size_t size,
}
#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
-int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, uint core_list,
struct edgetpu_ext_mailbox_info *mbx_info)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- uint orig_core_list = core_list;
+ uint orig_virt_core_list = virt_core_list;
u64 queue_iova;
+ uint virt_core;
int core;
int ret;
int i = 0;
- while (core_list) {
+ while (virt_core_list) {
phys_addr_t cmdq_pa = mbx_info->mailboxes[i].cmdq_pa;
phys_addr_t respq_pa = mbx_info->mailboxes[i++].respq_pa;
+ virt_core = ffs(virt_core_list) - 1;
+ virt_core_list &= ~BIT(virt_core);
core = ffs(core_list) - 1;
core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
- ret = iommu_map(mgr->core_domains[core], queue_iova,
+ ret = iommu_map(vd->core_domains[virt_core], queue_iova,
cmdq_pa, mbx_info->cmdq_size, IOMMU_WRITE);
if (ret)
goto error;
- ret = iommu_map(mgr->core_domains[core],
+ ret = iommu_map(vd->core_domains[virt_core],
queue_iova + mbx_info->cmdq_size, respq_pa,
mbx_info->respq_size, IOMMU_READ);
if (ret) {
- iommu_unmap(mgr->core_domains[core], queue_iova,
+ iommu_unmap(vd->core_domains[virt_core], queue_iova,
mbx_info->cmdq_size);
goto error;
}
@@ -498,129 +478,160 @@ int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
return 0;
error:
- core_list ^= orig_core_list;
- while (core_list) {
+ virt_core_list ^= orig_virt_core_list;
+ while (virt_core_list) {
+ virt_core = ffs(virt_core_list) - 1;
+ virt_core_list &= ~BIT(virt_core);
core = ffs(core_list) - 1;
core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
- iommu_unmap(mgr->core_domains[core], queue_iova,
+ iommu_unmap(vd->core_domains[virt_core], queue_iova,
mbx_info->cmdq_size);
- iommu_unmap(mgr->core_domains[core], queue_iova +
+ iommu_unmap(vd->core_domains[virt_core], queue_iova +
mbx_info->cmdq_size, mbx_info->respq_size);
}
return ret;
}
void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
struct gxp_tpu_mbx_desc mbx_desc)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ uint virt_core_list = mbx_desc.virt_core_list;
uint core_list = mbx_desc.phys_core_list;
u64 queue_iova;
int core;
+ uint virt_core;
- while (core_list) {
+ while (virt_core_list) {
+ virt_core = ffs(virt_core_list) - 1;
+ virt_core_list &= ~BIT(virt_core);
core = ffs(core_list) - 1;
core_list &= ~BIT(core);
queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
- iommu_unmap(mgr->core_domains[core], queue_iova,
+ iommu_unmap(vd->core_domains[virt_core], queue_iova,
mbx_desc.cmdq_size);
- iommu_unmap(mgr->core_domains[core], queue_iova +
+ iommu_unmap(vd->core_domains[virt_core], queue_iova +
mbx_desc.cmdq_size, mbx_desc.respq_size);
}
}
#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
-void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- uint gxp_dma_flags)
+int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size,
+ dma_addr_t dma_handle,
+ uint gxp_dma_flags)
{
struct gxp_dma_iommu_manager *mgr = container_of(
gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- void *buf;
struct sg_table *sgt;
- dma_addr_t daddr;
- int core;
+ int virt_core;
ssize_t size_mapped;
size = size < PAGE_SIZE ? PAGE_SIZE : size;
-
- /* Allocate a coherent buffer in the default domain */
- buf = dma_alloc_coherent(gxp->dev, size, &daddr, flag);
- if (!buf) {
- dev_err(gxp->dev, "Failed to allocate coherent buffer\n");
- return NULL;
- }
-
- sgt = alloc_sgt_for_buffer(buf, size, mgr->default_domain, daddr);
+ sgt = alloc_sgt_for_buffer(buf, size, mgr->default_domain, dma_handle);
if (IS_ERR(sgt)) {
dev_err(gxp->dev,
"Failed to allocate sgt for coherent buffer\n");
- dma_free_coherent(gxp->dev, size, buf, daddr);
- return NULL;
+ return -ENOMEM;
}
/* Create identical mappings in the specified cores' domains */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
-
/*
* In Linux 5.15 and beyond, `iommu_map_sg()` returns a
* `ssize_t` to encode errors that earlier versions throw out.
* Explicitly cast here for backwards compatibility.
*/
- size_mapped =
- (ssize_t)iommu_map_sg(mgr->core_domains[core], daddr,
- sgt->sgl, sgt->orig_nents,
- IOMMU_READ | IOMMU_WRITE);
+ size_mapped = (ssize_t)iommu_map_sg(vd->core_domains[virt_core],
+ dma_handle, sgt->sgl,
+ sgt->orig_nents,
+ IOMMU_READ | IOMMU_WRITE);
if (size_mapped != size)
goto err;
}
- if (dma_handle)
- *dma_handle = daddr;
-
sg_free_table(sgt);
kfree(sgt);
-
- return buf;
+ return 0;
err:
- for (core -= 1; core >= 0; core--)
- iommu_unmap(mgr->core_domains[core], daddr, size);
- dma_free_coherent(gxp->dev, size, buf, daddr);
+ for (virt_core -= 1; virt_core >= 0; virt_core--)
+ iommu_unmap(vd->core_domains[virt_core], dma_handle, size);
sg_free_table(sgt);
kfree(sgt);
+ return -EINVAL;
+}
- return NULL;
+void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ uint gxp_dma_flags)
+{
+ void *buf;
+ dma_addr_t daddr;
+ int ret;
+
+ size = size < PAGE_SIZE ? PAGE_SIZE : size;
+
+ /* Allocate a coherent buffer in the default domain */
+ buf = dma_alloc_coherent(gxp->dev, size, &daddr, flag);
+ if (!buf) {
+ dev_err(gxp->dev, "Failed to allocate coherent buffer\n");
+ return NULL;
+ }
+ if (vd != NULL) {
+ ret = gxp_dma_map_allocated_coherent_buffer(gxp, buf, vd,
+ virt_core_list,
+ size, daddr,
+ gxp_dma_flags);
+ if (ret) {
+ dma_free_coherent(gxp->dev, size, buf, daddr);
+ return NULL;
+ }
+ }
+
+ if (dma_handle)
+ *dma_handle = daddr;
+
+ return buf;
}
-void gxp_dma_free_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
+void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size,
+ dma_addr_t dma_handle)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- int core;
+ int virt_core;
size = size < PAGE_SIZE ? PAGE_SIZE : size;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
-
if (size !=
- iommu_unmap(mgr->core_domains[core], dma_handle, size))
+ iommu_unmap(vd->core_domains[virt_core], dma_handle, size))
dev_warn(gxp->dev, "Failed to unmap coherent buffer\n");
}
+}
+void gxp_dma_free_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ if (vd != NULL)
+ gxp_dma_unmap_allocated_coherent_buffer(gxp, vd, virt_core_list,
+ size, dma_handle);
dma_free_coherent(gxp->dev, size, cpu_addr, dma_handle);
}
-dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list,
- void *cpu_addr, size_t size,
+dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, void *cpu_addr, size_t size,
enum dma_data_direction direction,
unsigned long attrs, uint gxp_dma_flags)
{
@@ -629,7 +640,7 @@ dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list,
dma_addr_t daddr;
phys_addr_t paddr;
int prot = dma_info_to_prot(direction, 0, attrs);
- int core;
+ int virt_core;
daddr = dma_map_single_attrs(gxp->dev, cpu_addr, size, direction,
attrs);
@@ -637,11 +648,10 @@ dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list,
return DMA_MAPPING_ERROR;
paddr = iommu_iova_to_phys(mgr->default_domain, daddr);
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
-
- if (iommu_map(mgr->core_domains[core], daddr, paddr, size,
+ if (iommu_map(vd->core_domains[virt_core], daddr, paddr, size,
prot))
goto err;
}
@@ -649,36 +659,35 @@ dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list,
return daddr;
err:
- for (core -= 1; core >= 0; core--)
- iommu_unmap(mgr->core_domains[core], daddr, size);
+ for (virt_core -= 1; virt_core >= 0; virt_core--)
+ iommu_unmap(vd->core_domains[virt_core], daddr, size);
dma_unmap_single_attrs(gxp->dev, daddr, size, direction,
DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
}
-void gxp_dma_unmap_single(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
+void gxp_dma_unmap_single(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- int core;
+ int virt_core;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
if (size !=
- iommu_unmap(mgr->core_domains[core], dma_addr, size))
+ iommu_unmap(vd->core_domains[virt_core], dma_addr, size))
dev_warn(gxp->dev, "Failed to unmap single\n");
}
dma_unmap_single_attrs(gxp->dev, dma_addr, size, direction, attrs);
}
-dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list,
- struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction,
+dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
unsigned long attrs, uint gxp_dma_flags)
{
struct gxp_dma_iommu_manager *mgr = container_of(
@@ -686,7 +695,7 @@ dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list,
dma_addr_t daddr;
phys_addr_t paddr;
int prot = dma_info_to_prot(direction, 0, attrs);
- int core;
+ int virt_core;
daddr = dma_map_page_attrs(gxp->dev, page, offset, size, direction,
attrs);
@@ -694,11 +703,10 @@ dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list,
return DMA_MAPPING_ERROR;
paddr = iommu_iova_to_phys(mgr->default_domain, daddr);
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
-
- if (iommu_map(mgr->core_domains[core], daddr, paddr, size,
+ if (iommu_map(vd->core_domains[virt_core], daddr, paddr, size,
prot))
goto err;
}
@@ -706,96 +714,89 @@ dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list,
return daddr;
err:
- for (core -= 1; core >= 0; core--)
- iommu_unmap(mgr->core_domains[core], daddr, size);
+ for (virt_core -= 1; virt_core >= 0; virt_core--)
+ iommu_unmap(vd->core_domains[virt_core], daddr, size);
dma_unmap_page_attrs(gxp->dev, daddr, size, direction,
DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
}
-void gxp_dma_unmap_page(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
+void gxp_dma_unmap_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction, unsigned long attrs)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- int core;
+ int virt_core;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
if (size !=
- iommu_unmap(mgr->core_domains[core], dma_addr, size))
+ iommu_unmap(vd->core_domains[virt_core], dma_addr, size))
dev_warn(gxp->dev, "Failed to unmap page\n");
}
dma_unmap_page_attrs(gxp->dev, dma_addr, size, direction, attrs);
}
-dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp, uint core_list,
- phys_addr_t phys_addr, size_t size,
- enum dma_data_direction direction,
+dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction direction,
unsigned long attrs, uint gxp_dma_flags)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
dma_addr_t daddr;
int prot = dma_info_to_prot(direction, 0, attrs);
- int core;
+ int virt_core;
daddr = dma_map_resource(gxp->dev, phys_addr, size, direction, attrs);
if (dma_mapping_error(gxp->dev, daddr))
return DMA_MAPPING_ERROR;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
-
- if (iommu_map(mgr->core_domains[core], daddr, phys_addr, size,
- prot))
+ if (iommu_map(vd->core_domains[virt_core], daddr, phys_addr,
+ size, prot))
goto err;
}
return daddr;
err:
- for (core -= 1; core >= 0; core--)
- iommu_unmap(mgr->core_domains[core], daddr, size);
+ for (virt_core -= 1; virt_core >= 0; virt_core--)
+ iommu_unmap(vd->core_domains[virt_core], daddr, size);
dma_unmap_resource(gxp->dev, daddr, size, direction,
DMA_ATTR_SKIP_CPU_SYNC);
return DMA_MAPPING_ERROR;
}
-void gxp_dma_unmap_resource(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction,
+void gxp_dma_unmap_resource(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
- int core;
+ int virt_core;
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
if (size !=
- iommu_unmap(mgr->core_domains[core], dma_addr, size))
+ iommu_unmap(vd->core_domains[virt_core], dma_addr, size))
dev_warn(gxp->dev, "Failed to unmap resource\n");
}
dma_unmap_resource(gxp->dev, dma_addr, size, direction, attrs);
}
-int gxp_dma_map_sg(struct gxp_dev *gxp, uint core_list, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
+int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ int virt_core_list, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction, unsigned long attrs,
+ uint gxp_dma_flags)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
int nents_mapped;
dma_addr_t daddr;
int prot = dma_info_to_prot(direction, 0, attrs);
- int core;
+ int virt_core;
ssize_t size_mapped;
/* Variables needed to cleanup if an error occurs */
struct scatterlist *s;
@@ -808,16 +809,15 @@ int gxp_dma_map_sg(struct gxp_dev *gxp, uint core_list, struct scatterlist *sg,
daddr = sg_dma_address(sg);
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
-
/*
* In Linux 5.15 and beyond, `iommu_map_sg()` returns a
* `ssize_t` to encode errors that earlier versions throw out.
* Explicitly cast here for backwards compatibility.
*/
- size_mapped = (ssize_t)iommu_map_sg(mgr->core_domains[core],
+ size_mapped = (ssize_t)iommu_map_sg(vd->core_domains[virt_core],
daddr, sg, nents, prot);
if (size_mapped <= 0)
goto err;
@@ -830,31 +830,29 @@ err:
size += sg_dma_len(s);
}
- for (core -= 1; core >= 0; core--)
- iommu_unmap(mgr->core_domains[core], daddr, size);
+ for (virt_core -= 1; virt_core >= 0; virt_core--)
+ iommu_unmap(vd->core_domains[virt_core], daddr, size);
dma_unmap_sg_attrs(gxp->dev, sg, nents, direction, attrs);
return 0;
}
-void gxp_dma_unmap_sg(struct gxp_dev *gxp, uint core_list,
- struct scatterlist *sg, int nents,
+void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
struct scatterlist *s;
int i;
size_t size = 0;
- int core;
+ int virt_core;
for_each_sg(sg, s, nents, i) {
size += sg_dma_len(s);
}
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
- if (!iommu_unmap(mgr->core_domains[core], sg_dma_address(sg),
+ if (!iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sg),
size))
dev_warn(gxp->dev, "Failed to unmap sg\n");
}
@@ -892,51 +890,15 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
dma_sync_sg_for_device(gxp->dev, sg, nents, direction);
}
-#ifdef CONFIG_GXP_TEST
-/*
- * gxp-dma-iommu.h interface
- * These APIs expose gxp-dma-iommu implementation details for unit testing.
- * They are not meant to be used by other components fo the driver.
- */
-
-struct iommu_domain *gxp_dma_iommu_get_default_domain(struct gxp_dev *gxp)
-{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
-
- if (!mgr)
- return ERR_PTR(-ENODEV);
-
- return mgr->default_domain;
-}
-
-struct iommu_domain *gxp_dma_iommu_get_core_domain(struct gxp_dev *gxp,
- uint core)
+struct sg_table *gxp_dma_map_dmabuf_attachment(
+ struct gxp_dev *gxp, struct gxp_virtual_device *vd, uint virt_core_list,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
-
- if (!mgr)
- return ERR_PTR(-ENODEV);
-
- if (core >= GXP_NUM_CORES)
- return ERR_PTR(-EINVAL);
-
- return mgr->core_domains[core];
-}
-#endif // CONFIG_GXP_TEST
-
-struct sg_table *
-gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
- struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
-{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
struct sg_table *sgt;
- int core;
int prot = dma_info_to_prot(direction, /*coherent=*/0, /*attrs=*/0);
ssize_t size_mapped;
+ int virt_core;
int ret;
/* Variables needed to cleanup if an error occurs */
struct scatterlist *s;
@@ -953,23 +915,22 @@ gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
}
/* Map the sgt into the aux domain of all specified cores */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
-
/*
* In Linux 5.15 and beyond, `iommu_map_sg()` returns a
* `ssize_t` to encode errors that earlier versions throw out.
* Explicitly cast here for backwards compatibility.
*/
size_mapped =
- (ssize_t)iommu_map_sg(mgr->core_domains[core],
+ (ssize_t)iommu_map_sg(vd->core_domains[virt_core],
sg_dma_address(sgt->sgl),
sgt->sgl, sgt->orig_nents, prot);
if (size_mapped <= 0) {
dev_err(gxp->dev,
- "Failed to map dma-buf to core %d (ret=%ld)\n",
- core, size_mapped);
+ "Failed to map dma-buf to virtual core %d (ret=%ld)\n",
+ virt_core, size_mapped);
/*
* Prior to Linux 5.15, `iommu_map_sg()` returns 0 for
* any failure. Return a generic IO error in this case.
@@ -985,42 +946,40 @@ err:
for_each_sg(sgt->sgl, s, sgt->nents, i)
size += sg_dma_len(s);
- for (core -= 1; core >= 0; core--)
- iommu_unmap(mgr->core_domains[core], sg_dma_address(sgt->sgl),
- size);
+ for (virt_core -= 1; virt_core >= 0; virt_core--)
+ iommu_unmap(vd->core_domains[virt_core], sg_dma_address(sgt->sgl), size);
dma_buf_unmap_attachment(attachment, sgt, direction);
return ERR_PTR(ret);
}
-void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list,
struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction)
{
- struct gxp_dma_iommu_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
struct scatterlist *s;
int i;
size_t size = 0;
- int core;
+ int virt_core;
/* Find the size of the mapping in IOVA-space */
for_each_sg(sgt->sgl, s, sgt->nents, i)
size += sg_dma_len(s);
/* Unmap the dma-buf from the aux domain of all specified cores */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (!(core_list & BIT(core)))
+ for (virt_core = 0; virt_core < vd->num_cores; virt_core++) {
+ if (!(virt_core_list & BIT(virt_core)))
continue;
-
- if (!iommu_unmap(mgr->core_domains[core],
+ if (!iommu_unmap(vd->core_domains[virt_core],
sg_dma_address(sgt->sgl), size))
dev_warn(
gxp->dev,
- "Failed to unmap dma-buf from core %d\n",
- core);
+ "Failed to unmap dma-buf from virtual core %d\n",
+ virt_core);
}
/* Unmap the attachment from the default domain */
diff --git a/gxp-dma-iommu.h b/gxp-dma-iommu.h
deleted file mode 100644
index 971c45f..0000000
--- a/gxp-dma-iommu.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * GXP DMA IOMMU-specific interface.
- *
- * Copyright (C) 2021 Google LLC
- */
-#ifndef __GXP_DMA_IOMMU_H__
-#define __GXP_DMA_IOMMU_H__
-
-#include <linux/iommu.h>
-
-#include "gxp-internal.h"
-
-#ifdef CONFIG_GXP_TEST
-struct iommu_domain *gxp_dma_iommu_get_default_domain(struct gxp_dev *gxp);
-struct iommu_domain *gxp_dma_iommu_get_core_domain(struct gxp_dev *gxp,
- uint core);
-#endif
-
-#endif /* __GXP_DMA_IOMMU_H__ */
diff --git a/gxp-dma-rmem.c b/gxp-dma-rmem.c
deleted file mode 100644
index 578735e..0000000
--- a/gxp-dma-rmem.c
+++ /dev/null
@@ -1,615 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GXP DMA implemented via reserved memory carveouts.
- *
- * Copyright (C) 2021 Google LLC
- */
-
-#include <linux/genalloc.h>
-#include <linux/highmem.h>
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#include "gxp-config.h"
-#include "gxp-dma.h"
-#include "gxp-internal.h"
-#include "gxp-mapping.h"
-
-struct gxp_dma_bounce_buffer {
- struct rb_node node;
- dma_addr_t dma_handle;
- struct page *page;
- size_t size;
- unsigned long offset;
- void *buf;
-};
-
-struct gxp_dma_rmem_manager {
- struct gxp_dma_manager dma_mgr;
- struct gxp_mapped_resource poolbuf;
- struct gen_pool *pool;
- struct rb_root bounce_buffers;
- struct mutex bounce_lock;
-};
-
-/* RB Tree Management Functions for the Bounce Buffer tree */
-
-static int bounce_buffer_put(struct gxp_dma_rmem_manager *mgr,
- struct gxp_dma_bounce_buffer *bounce)
-{
- struct rb_node **link;
- struct rb_node *parent = NULL;
- struct gxp_dma_bounce_buffer *this;
-
- link = &mgr->bounce_buffers.rb_node;
-
- mutex_lock(&mgr->bounce_lock);
-
- while (*link) {
- parent = *link;
- this = rb_entry(parent, struct gxp_dma_bounce_buffer, node);
-
- if (this->dma_handle > bounce->dma_handle)
- link = &(*link)->rb_left;
- else if (this->dma_handle < bounce->dma_handle)
- link = &(*link)->rb_right;
- else
- goto out;
- }
-
- rb_link_node(&bounce->node, parent, link);
- rb_insert_color(&bounce->node, &mgr->bounce_buffers);
-
- mutex_unlock(&mgr->bounce_lock);
-
- return 0;
-
-out:
- mutex_unlock(&mgr->bounce_lock);
- return -EINVAL;
-}
-
-static struct gxp_dma_bounce_buffer *
-bounce_buffer_get(struct gxp_dma_rmem_manager *mgr, dma_addr_t dma_handle)
-{
- struct rb_node *node;
- struct gxp_dma_bounce_buffer *this;
-
- mutex_lock(&mgr->bounce_lock);
-
- node = mgr->bounce_buffers.rb_node;
-
- while (node) {
- this = rb_entry(node, struct gxp_dma_bounce_buffer, node);
-
- if (this->dma_handle > dma_handle) {
- node = node->rb_left;
- } else if (this->dma_handle + this->size <= dma_handle) {
- node = node->rb_right;
- } else {
- mutex_unlock(&mgr->bounce_lock);
- return this;
- }
- }
-
- mutex_unlock(&mgr->bounce_lock);
-
- return NULL;
-}
-
-static void bounce_buffer_remove(struct gxp_dma_rmem_manager *mgr,
- struct gxp_dma_bounce_buffer *bounce)
-{
- rb_erase(&bounce->node, &mgr->bounce_buffers);
-}
-
-/* gxp-dma.h Interface */
-
-int gxp_dma_ssmt_program(struct gxp_dev *gxp)
-{
- /* NO-OP when using reserved memory with no IOMMU */
- return 0;
-}
-
-int gxp_dma_init(struct gxp_dev *gxp)
-{
- struct gxp_dma_rmem_manager *mgr;
- struct resource r;
- int ret;
-
- mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return -ENOMEM;
-
- /* Map the reserved memory for the pool from the device tree */
- if (gxp_acquire_rmem_resource(gxp, &r, "gxp-pool-region")) {
- dev_err(gxp->dev, "Unable to acquire pool reserved memory\n");
- return -ENODEV;
- }
-
- mgr->poolbuf.paddr = r.start;
- mgr->poolbuf.size = resource_size(&r);
- mgr->poolbuf.vaddr = devm_memremap(gxp->dev, mgr->poolbuf.paddr,
- mgr->poolbuf.size, MEMREMAP_WC);
- if (IS_ERR_OR_NULL(mgr->poolbuf.vaddr)) {
- dev_err(gxp->dev, "Failed to map pool\n");
- return -ENODEV;
- }
-
- /* Create the gen pool for mappings/coherent allocations */
- mgr->pool = devm_gen_pool_create(gxp->dev, PAGE_SHIFT, -1, "gxp-pool");
- if (!mgr->pool) {
- dev_err(gxp->dev, "Failed to create memory pool\n");
- return -ENOMEM;
- }
-
- ret = gen_pool_add_virt(mgr->pool, (unsigned long)mgr->poolbuf.vaddr,
- mgr->poolbuf.paddr, mgr->poolbuf.size, -1);
- if (ret) {
- dev_err(gxp->dev, "Failed to add memory to pool (ret = %d)\n",
- ret);
- return ret;
- }
-
- mgr->dma_mgr.mapping_tree = RB_ROOT;
- mgr->bounce_buffers = RB_ROOT;
-
- gxp->dma_mgr = &(mgr->dma_mgr);
-
- return 0;
-}
-
-void gxp_dma_exit(struct gxp_dev *gxp)
-{
- /* no cleanup */
-}
-
-int gxp_dma_map_resources(struct gxp_dev *gxp)
-{
- unsigned int core;
-
- /* all resources are accessed via PA if there's no iommu */
- for (core = 0; core < GXP_NUM_CORES; core++) {
- gxp->mbx[core].daddr = gxp->mbx[core].paddr;
- gxp->fwbufs[core].daddr = gxp->fwbufs[core].paddr;
- }
- gxp->regs.daddr = gxp->regs.paddr;
- gxp->coredumpbuf.daddr = gxp->coredumpbuf.paddr;
-
- return 0;
-}
-
-void gxp_dma_unmap_resources(struct gxp_dev *gxp)
-{
- /* no mappings to undo */
-}
-
-#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
-int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
- struct edgetpu_ext_mailbox_info *mbx_info)
-{
- return -ENODEV;
-}
-
-void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
- struct gxp_tpu_mbx_desc mbx_desc)
-{
- /* no mappings to undo */
-}
-#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
-
-void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- uint gxp_dma_flags)
-{
- /* Allocate the buffer from the cache-coherent pool */
- struct gxp_dma_rmem_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_rmem_manager, dma_mgr);
- void *vaddr = (void *)gen_pool_alloc(mgr->pool, size);
-
- if (!vaddr) {
- dev_err(gxp->dev, "Unable to allocate coherent buffer\n");
- return NULL;
- }
-
- /*
- * On SysMMU-less systems, all GXP cores access DRAM directly, so set
- * the dma_handle to the buffer's physical address.
- */
- if (dma_handle) {
- *dma_handle =
- gen_pool_virt_to_phys(mgr->pool, (unsigned long)vaddr);
-
- if (*dma_handle == -1) {
- dev_err(gxp->dev,
- "Unable to get dma_addr_t for coherent buffer\n");
- gen_pool_free(mgr->pool, (unsigned long)vaddr, size);
- return NULL;
- }
- }
-
- /* `core_list` is unused, since no SysMMU means there's no mappings */
- return vaddr;
-}
-
-void gxp_dma_free_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
- void *cpu_addr, dma_addr_t dma_handle)
-{
- /* No unmapping required since there's no SysMMU */
-
- /* Clean up the buffer */
- struct gxp_dma_rmem_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_rmem_manager, dma_mgr);
- gen_pool_free(mgr->pool, (unsigned long)cpu_addr, size);
-}
-
-dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list,
- void *cpu_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
-{
- return gxp_dma_map_page(gxp, core_list, virt_to_page(cpu_addr),
- offset_in_page(cpu_addr), size, direction,
- attrs, gxp_dma_flags);
-}
-
-void gxp_dma_unmap_single(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- return gxp_dma_unmap_page(gxp, core_list, dma_addr, size, direction,
- attrs);
-}
-
-dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list,
- struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
-{
- struct gxp_dma_rmem_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_rmem_manager, dma_mgr);
- struct gxp_dma_bounce_buffer *bounce;
- void *page_buf;
- int ret;
-
- bounce = kzalloc(sizeof(struct gxp_dma_bounce_buffer), GFP_KERNEL);
- if (!bounce) {
- dev_err(gxp->dev,
- "Failed to allocate tracking struct for mapping page\n");
- return DMA_MAPPING_ERROR;
- }
-
- bounce->offset = offset;
- bounce->size = size;
- bounce->page = page;
-
- bounce->buf = (void *)gen_pool_alloc(mgr->pool, size);
- if (!bounce->buf) {
- dev_err(gxp->dev,
- "Failed to allocate bounce buffer for mapping page\n");
- goto pool_alloc_error;
- }
-
- bounce->dma_handle =
- gen_pool_virt_to_phys(mgr->pool, (unsigned long)bounce->buf);
- if (bounce->dma_handle == -1) {
- dev_err(gxp->dev, "Unable to get dma_addr_t for mapped page\n");
- goto error;
- }
-
- page_buf = kmap(page);
- if (!page_buf) {
- dev_err(gxp->dev,
- "Failed to map page for copying to bounce buffer\n");
- goto error;
- }
- memcpy(bounce->buf, page_buf + offset, size);
- kunmap(page);
-
- ret = bounce_buffer_put(mgr, bounce);
- if (ret) {
- dev_err(gxp->dev,
- "Unable to put bounce buffer!\n");
- goto error;
- }
-
- return bounce->dma_handle;
-
-error:
- gen_pool_free(mgr->pool, (unsigned long)bounce->buf, bounce->size);
-pool_alloc_error:
- kfree(bounce);
- return DMA_MAPPING_ERROR;
-}
-
-void gxp_dma_unmap_page(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction, unsigned long attrs)
-{
- struct gxp_dma_rmem_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_rmem_manager, dma_mgr);
- struct gxp_dma_bounce_buffer *bounce =
- bounce_buffer_get(mgr, dma_addr);
- void *page_buf = NULL;
-
- if (!bounce || !bounce->page) {
- dev_err(gxp->dev, "No page to unmap for IOVA %pad\n",
- &dma_addr);
- return;
- }
-
- bounce_buffer_remove(mgr, bounce);
-
- page_buf = kmap(bounce->page);
- if (!page_buf) {
- dev_warn(
- gxp->dev,
- "Failed to map page for copying from bounce buffer on unmap\n");
- } else {
- memcpy(page_buf + bounce->offset, bounce->buf, bounce->size);
- }
- kunmap(bounce->page);
-
- gen_pool_free(mgr->pool, (unsigned long)bounce->buf, bounce->size);
- kfree(bounce);
-}
-
-dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp, uint core_list,
- phys_addr_t phys_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
-{
- dev_warn(gxp->dev, "%s: not yet supported!\n", __func__);
- return 0;
-}
-
-void gxp_dma_unmap_resource(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long attrs)
-{
- dev_warn(gxp->dev, "%s: not yet supported!\n", __func__);
-}
-
-int gxp_dma_map_sg(struct gxp_dev *gxp, uint core_list, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags)
-{
- struct gxp_dma_rmem_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_rmem_manager, dma_mgr);
- struct gxp_dma_bounce_buffer *bounce;
- struct scatterlist *s;
- size_t size_so_far = 0;
- int i;
-
- bounce = kzalloc(sizeof(struct gxp_dma_bounce_buffer), GFP_KERNEL);
- if (!bounce) {
- dev_err(gxp->dev,
- "Failed to allocate tracking struct for mapping sg\n");
- return 0;
- }
-
- for_each_sg(sg, s, nents, i)
- bounce->size += s->length;
-
- bounce->buf = (void *)gen_pool_alloc(mgr->pool, bounce->size);
- if (!bounce->buf) {
- dev_err(gxp->dev,
- "Failed to allocate bounce buffer for mapping sg\n");
- goto pool_alloc_error;
- }
-
- sg_copy_to_buffer(sg, nents, bounce->buf, bounce->size);
-
- for_each_sg(sg, s, nents, i) {
- s->dma_length = s->length;
- s->dma_address = gen_pool_virt_to_phys(
- mgr->pool, (unsigned long)bounce->buf) + size_so_far;
- size_so_far += s->length;
-
- if (s->dma_address == -1) {
- dev_err(gxp->dev,
- "Failed to get dma_addr_t while mapping sg\n");
- goto error;
- }
- }
-
- bounce->dma_handle = sg->dma_address;
- /* SGs use the SG's internal page and offset values */
- bounce->page = NULL;
- bounce->offset = 0;
-
- if (bounce_buffer_put(mgr, bounce)) {
- dev_err(gxp->dev, "Unable to put bounce buffer for sg!\n");
- goto error;
- }
-
- return nents;
-
-error:
- /* TODO is this necessary? */
- for_each_sg(sg, s, nents, i) {
- s->dma_length = 0;
- s->dma_address = 0;
- }
-
- gen_pool_free(mgr->pool, (unsigned long) bounce->buf, bounce->size);
-
-pool_alloc_error:
- kfree(bounce);
-
- return 0;
-}
-
-void gxp_dma_unmap_sg(struct gxp_dev *gxp, uint core_list,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction, unsigned long attrs)
-{
- struct gxp_dma_rmem_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_rmem_manager, dma_mgr);
- struct gxp_dma_bounce_buffer *bounce =
- bounce_buffer_get(mgr, sg->dma_address);
- struct scatterlist *s;
- int i;
-
- if (!bounce || bounce->page) {
- dev_err(gxp->dev, "No sg to unmap for IOVA %pad\n",
- &sg->dma_address);
- return;
- }
-
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- gxp_dma_sync_sg_for_cpu(gxp, sg, nents, direction);
-
- bounce_buffer_remove(mgr, bounce);
-
- /* TODO is this necessary? */
- for_each_sg(sg, s, nents, i) {
- s->dma_length = 0;
- s->dma_address = 0;
- }
-
- gen_pool_free(mgr->pool, (unsigned long)bounce->buf, bounce->size);
- kfree(bounce);
-}
-
-void gxp_dma_sync_single_for_cpu(struct gxp_dev *gxp, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
-{
- struct gxp_dma_rmem_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_rmem_manager, dma_mgr);
- struct gxp_dma_bounce_buffer *bounce =
- bounce_buffer_get(mgr, dma_handle);
- void *page_buf = NULL;
- unsigned long addr_offset;
-
- if (!bounce || !bounce->page) {
- dev_err(gxp->dev, "No single mapping to sync for IOVA %pad\n",
- &dma_handle);
- return;
- }
-
- addr_offset = dma_handle - bounce->dma_handle;
-
- /* Copy the contents of the bounce buffer back to the mapped page */
- page_buf = kmap(bounce->page);
- if (!page_buf) {
- dev_warn(gxp->dev,
- "Failed to map page for syncing from bounce buffer\n");
- return;
- }
- memcpy(page_buf + bounce->offset + addr_offset,
- bounce->buf + addr_offset, bounce->size);
- kunmap(bounce->page);
-}
-
-void gxp_dma_sync_single_for_device(struct gxp_dev *gxp, dma_addr_t dma_handle,
- size_t size,
- enum dma_data_direction direction)
-{
- struct gxp_dma_rmem_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_rmem_manager, dma_mgr);
- struct gxp_dma_bounce_buffer *bounce =
- bounce_buffer_get(mgr, dma_handle);
- void *page_buf = NULL;
- unsigned long addr_offset;
-
- if (!bounce || !bounce->page) {
- dev_err(gxp->dev, "No single mapping to sync for IOVA %pad\n",
- &dma_handle);
- return;
- }
-
- addr_offset = dma_handle - bounce->dma_handle;
-
- /* Copy the latest contents of the mapped page to the bounce buffer*/
- page_buf = kmap(bounce->page);
- if (!page_buf) {
- dev_warn(gxp->dev,
- "Failed to map page for syncing to bounce buffer\n");
- return;
- }
- memcpy(bounce->buf + addr_offset,
- page_buf + bounce->offset + addr_offset, bounce->size);
- kunmap(bounce->page);
-}
-
-void gxp_dma_sync_sg_for_cpu(struct gxp_dev *gxp, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- struct gxp_dma_rmem_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_rmem_manager, dma_mgr);
- struct gxp_dma_bounce_buffer *bounce =
- bounce_buffer_get(mgr, sg->dma_address);
- void *page_buf;
- struct scatterlist *s;
- unsigned int i;
-
- if (!bounce || bounce->page) {
- dev_err(gxp->dev, "No mapping to sync for sg\n");
- return;
- }
-
- for_each_sg(sg, s, nents, i) {
- page_buf = kmap(sg_page(s));
- if (!page_buf) {
- dev_warn(gxp->dev, "Failed to map page for sg sync\n");
- continue;
- }
- memcpy(page_buf + s->offset,
- bounce->buf + (s->dma_address - bounce->dma_handle),
- s->dma_length);
- kunmap(sg_page(s));
- }
-}
-
-void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
- int nents, enum dma_data_direction direction)
-{
- struct gxp_dma_rmem_manager *mgr = container_of(
- gxp->dma_mgr, struct gxp_dma_rmem_manager, dma_mgr);
- struct gxp_dma_bounce_buffer *bounce =
- bounce_buffer_get(mgr, sg->dma_address);
- void *page_buf;
- struct scatterlist *s;
- unsigned int i;
-
- if (!bounce || bounce->page) {
- dev_err(gxp->dev, "No mapping to sync for sg\n");
- return;
- }
-
- for_each_sg(sg, s, nents, i) {
- page_buf = kmap(sg_page(s));
- if (!page_buf) {
- dev_warn(gxp->dev, "Failed to map page for sg sync\n");
- continue;
- }
- memcpy(bounce->buf + (s->dma_address - bounce->dma_handle),
- page_buf + s->offset, s->dma_length);
- kunmap(sg_page(s));
- }
-}
-
-struct sg_table *
-gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
- struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
-{
- dev_warn(gxp->dev, "%s not supported on systems without an IOMMU\n",
- __func__);
- return ERR_PTR(-ENOSYS);
-}
-
-void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
- struct dma_buf_attachment *attachment,
- struct sg_table *sgt,
- enum dma_data_direction direction)
-{
- dev_warn(gxp->dev, "%s not supported on systems without an IOMMU\n",
- __func__);
-}
diff --git a/gxp-dma.h b/gxp-dma.h
index 3d72f65..71c3c50 100644
--- a/gxp-dma.h
+++ b/gxp-dma.h
@@ -32,27 +32,6 @@ struct gxp_dma_manager {
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
#endif
-/*
- * TODO(b/214113464) This is a temporary interface to reprogram the SSMT every
- * time the block is powered up. It should be replaced with a new interface for
- * assigning a given virtual device's domain to that virtual device's physical
- * core once VD suspend/resume is implemented.
- */
-/**
- * gxp_dma_ssmt_program() - Program the SSMTs to map each core to its page table.
- * @gxp: The GXP device to program the SSMTs for
- *
- * Every time the DSP block is powered on, the SSMTs must be re-programmed to
- * map traffic from each physical core to be translated via that core's
- * assigned page table. This API must be called every time the block is powered
- * on for DSP usage.
- *
- * Return:
- * * 0 - Success
- * * Other - Reserved
- */
-int gxp_dma_ssmt_program(struct gxp_dev *gxp);
-
/**
* gxp_dma_init() - Initialize the GXP DMA subsystem
* @gxp: The GXP device to initialize DMA for
@@ -72,88 +51,210 @@ int gxp_dma_init(struct gxp_dev *gxp);
void gxp_dma_exit(struct gxp_dev *gxp);
/**
- * gxp_dma_map_resources() - Map the various buffers/registers with fixed IOVAs
- * @gxp: The GXP device to setup the mappings for
+ * gxp_dma_domain_attach_device() - Attach the page table of a virtual core to
+ * the device and perform any necessary initialization.
+ * @gxp: The GXP device to attach
+ * @vd: The virtual device including the virtual core
+ * @virt_core: The virtual core the page table belongs to
+ * @core: The physical core is bound with the virtual core
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
+ */
+int gxp_dma_domain_attach_device(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core);
+
+/**
+ * gxp_dma_domain_detach_device() - Detach the page table of a virtual core from
+ * the device.
+ * @gxp: The GXP device to detach
+ * @vd: The virtual device including the virtual core
+ * @virt_core: The virtual core the page table belongs to
+ *
+ * The client the @vd belongs to must hold a BLOCK wakelock for the iommu
+ * detaching
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
+ */
+void gxp_dma_domain_detach_device(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core);
+
+/**
+ * gxp_dma_init_default_resources() - Set the various buffers/registers with
+ * fixed IOVA
+ * @gxp: The GXP device to set up the IOVAs for
+ *
+ * GXP firmware expects several buffers and registers to be mapped to fixed
+ * locations in their IOVA space. This function sets up these fixed IOVAs.
+ */
+void gxp_dma_init_default_resources(struct gxp_dev *gxp);
+
+/**
+ * gxp_dma_map_core_resources() - Map the various buffers/registers with
+ * fixed IOVAs on certain virtual core
+ * @gxp: The GXP device to set up the mappings for
+ * @vd: The virtual device including the virtual core the IOVA are mapped for
+ * @virt_core: The virtual core the IOVAs are mapped for
+ * @core: The corresponding physical core of the @virt_core
*
* GXP firmware expects several buffers and registers to be mapped to fixed
- * locations in their IOVA space. This function initializes all those mappings.
+ * locations in their IOVA space. This function initializes all those mappings
+ * for the core.
*
* This function must not be called until after all the `vaddr` and `size`
* fields of every `struct gxp_mapped_resource` inside of @gxp have been
* initialized.
*
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
+ *
* Return:
* * 0 - Mappings created successfully
* * -EIO - Failed to create one or more of the mappings
*/
-int gxp_dma_map_resources(struct gxp_dev *gxp);
+int gxp_dma_map_core_resources(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core);
/**
- * gxp_dma_unmap_resources() - Unmap the IOVAs mapped by gxp_dma_map_resources
- * @gxp: The GXP device that was passed to gxp_dma_map_resources()
+ * gxp_dma_unmap_core_resources() - Unmap the IOVAs mapped by
+ * gxp_dma_map_resources
+ * @gxp: The GXP device that was passed to gxp_dma_map_core_resources()
+ * @vd: The virtual device including the virtual core the IOVAs were mapped for
+ * @virt_core: The virtual core the IOVAs were mapped for
+ * @core: The physical cores the IOVAs were mapped for
*
* GXP firmware expects several buffers and registers to be mapped to fixed
* locations in their IOVA space. This function releases all those mappings.
*
- * This function should be called after gxp_dma_map_resources().
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-void gxp_dma_unmap_resources(struct gxp_dev *gxp);
+void gxp_dma_unmap_core_resources(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core);
#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
/**
* gxp_dma_map_tpu_buffer() - Map the tpu mbx queue buffers with fixed IOVAs
- * @gxp: The GXP device to setup the mappings for
+ * @gxp: The GXP device to set up the mappings for
+ * @vd: The virtual device including the virtual cores the mapping is for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
* @core_list: A bitfield enumerating the physical cores the mapping is for
* @mbx_info: Structure holding TPU-DSP mailbox queue buffer information
*
* Return:
* * 0 - Mappings created successfully
* * -EIO - Failed to create the mappings
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, uint core_list,
struct edgetpu_ext_mailbox_info *mbx_info);
/**
* gxp_dma_unmap_tpu_buffer() - Unmap IOVAs mapped by gxp_dma_map_tpu_buffer()
* @gxp: The GXP device that was passed to gxp_dma_map_tpu_buffer()
- * @mbx_desc: Structure holding info for already mapped TPU-DSP mailboxes.
+ * @vd: The virtual device including the virtual cores the mapping was for
+ * @mbx_desc: Structure holding info for already mapped TPU-DSP mailboxes. The
+ * list of virtual cores to unmap is in this descriptor.
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
struct gxp_tpu_mbx_desc mbx_desc);
#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
/**
+ * gxp_dma_map_allocated_coherent_buffer() - Map a coherent buffer
+ * @gxp: The GXP device to map the allocated buffer for
+ * @vd: The virtual device including the virtual cores the mapping is for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
+ * @size: The size of the allocated buffer, in bytes
+ * @dma_handle: The allocated device IOVA
+ * @gxp_dma_flags: The type of mapping to create; currently unused
+ *
+ * Return: Kernel virtual address of the mapped buffer
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
+ */
+int gxp_dma_map_allocated_coherent_buffer(struct gxp_dev *gxp, void *buf,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size,
+ dma_addr_t dma_handle,
+ uint gxp_dma_flags);
+/**
+ * gxp_dma_unmap_allocated_coherent_buffer() - Unmap a coherent buffer
+ * @gxp: The GXP device the buffer was allocated and mapped for
+ * @vd: The virtual device including the virtual cores the mapping was for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
+ * @size: The size of the buffer, in bytes
+ * @dma_handle: The device IOVA
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
+ */
+void gxp_dma_unmap_allocated_coherent_buffer(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size,
+ dma_addr_t dma_handle);
+/**
* gxp_dma_alloc_coherent() - Allocate and map a coherent buffer for a GXP core
* @gxp: The GXP device to map the allocated buffer for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the mapping is for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
* @size: The size of the buffer to be allocated, in bytes
* @dma_handle: Reference to a variable to be set to the allocated IOVA
* @flag: The type of memory to allocate (see kmalloc)
* @gxp_dma_flags: The type of mapping to create; Currently unused
*
* Return: Kernel virtual address of the allocated/mapped buffer
+ *
+ * If the passed @vd is a null pointer, this function will only allocate a
+ * buffer but not map it to any particular core.
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
+void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
uint gxp_dma_flags);
/**
* gxp_dma_free_coherent() - Unmap and free a coherent buffer
* @gxp: The GXP device the buffer was allocated and mapped for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
- * @size: The size of the buffer, in bytes, passed to `gxp_dma_alloc()`
- * @cpu_addr: The kernel virtual address returned by `gxp_dma_alloc()`
- * @dma_handle: The device IOVA, set by `gxp_dma_alloc()`
+ * @vd: The virtual device including the virtual cores the mapping was for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
+ * @size: The size of the buffer, in bytes, passed to `gxp_dma_alloc_coherent()`
+ * @cpu_addr: The kernel virtual address returned by `gxp_dma_alloc_coherent()`
+ * @dma_handle: The device IOVA, set by `gxp_dma_alloc_coherent()`
+ *
+ * If the buffer is mapped via `gxp_dma_map_allocated_coherent_buffer`, the
+ * caller must call `gxp_dma_unmap_allocated_coherent_buffer` to unmap before
+ * freeing the buffer.
+ *
+ * If the passed @vd is a null pointer, this function will only free the buffer
+ * but not do any unmapping.
*
- * If the buffer has been mirror-mapped via `gxp_dma_mirror_map()`, the buffer
- * will not be freed until all mappings have been unmapped.
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-void gxp_dma_free_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
- void *cpu_addr, dma_addr_t dma_handle);
+void gxp_dma_free_coherent(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle);
/**
* gxp_dma_map_single() - Create a mapping for a kernel buffer
* @gxp: The GXP device to map the buffer for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the mapping is for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
* @cpu_addr: The kernel virtual address of the buffer to map
* @size: The size of the buffer to map, in bytes
* @direction: DMA direction
@@ -161,29 +262,38 @@ void gxp_dma_free_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
* @gxp_dma_flags: The type of mapping to create; Currently unused
*
* Return: The IOVA the buffer was mapped to
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp, uint core_list,
- void *cpu_addr, size_t size,
+dma_addr_t gxp_dma_map_single(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, void *cpu_addr, size_t size,
enum dma_data_direction direction,
unsigned long attrs, uint gxp_dma_flags);
/**
* gxp_dma_unmap_single() - Unmap a kernel buffer
* @gxp: The GXP device the buffer was mapped for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the mapping was for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
* @dma_addr: The device IOVA, returned by `gxp_dma_map_single()`
* @size: The size of the mapping, which was passed to `gxp_dma_map_single()`
* @direction: DMA direction; same as passed to `gxp_dma_map_single()`
* @attrs: The same set of flags used by the base DMA API
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-void gxp_dma_unmap_single(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
+void gxp_dma_unmap_single(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction,
unsigned long attrs);
/**
* gxp_dma_map_page() - Create a mapping for a physical page of memory
* @gxp: The GXP device to map the page for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the mapping is for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
* @page: The `struct page` of the physical page to create a mapping for
* @offset: The offset into @page to begin the mapping at
* @size: The number of bytes in @page to map
@@ -192,28 +302,37 @@ void gxp_dma_unmap_single(struct gxp_dev *gxp, uint core_list,
* @gxp_dma_flags: The type of mapping to create; Currently unused
*
* Return: The IOVA the page was mapped to
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, uint core_list,
- struct page *page, unsigned long offset,
- size_t size, enum dma_data_direction direction,
+dma_addr_t gxp_dma_map_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
unsigned long attrs, uint gxp_dma_flags);
/**
* gxp_dma_unmap_page() - Unmap a physical page of memory
* @gxp: The GXP device the page was mapped for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the mapping was for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
* @dma_addr: The device IOVA, returned by `gxp_dma_map_page()`
* @size: The size of the mapping, which was passed to `gxp_dma_map_page()`
* @direction: DMA direction; Same as passed to `gxp_dma_map_page()`
* @attrs: The same set of flags used by the base DMA API
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-void gxp_dma_unmap_page(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
+void gxp_dma_unmap_page(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction, unsigned long attrs);
/**
* gxp_dma_map_resource() - Create a mapping for an MMIO resource
* @gxp: The GXP device to map the resource for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the mapping is for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
* @phys_addr: The physical address of the MMIO resource to map
* @size: The size of the MMIO region to map, in bytes
* @direction: DMA direction
@@ -221,29 +340,38 @@ void gxp_dma_unmap_page(struct gxp_dev *gxp, uint core_list,
* @gxp_dma_flags: The type of mapping to create; Currently unused
*
* Return: The IOVA the MMIO resource was mapped to
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp, uint core_list,
- phys_addr_t phys_addr, size_t size,
- enum dma_data_direction direction,
+dma_addr_t gxp_dma_map_resource(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, phys_addr_t phys_addr,
+ size_t size, enum dma_data_direction direction,
unsigned long attrs, uint gxp_dma_flags);
/**
* gxp_dma_unmap_resource() - Unmap an MMIO resource
* @gxp: The GXP device the MMIO resource was mapped for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the mapping was for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
* @dma_addr: The device IOVA, returned by `gxp_dma_map_resource()`
* @size: The size of the mapping, which was passed to `gxp_dma_map_resource()`
* @direction: DMA direction; Same as passed to `gxp_dma_map_resource()`
* @attrs: The same set of flags used by the base DMA API
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-void gxp_dma_unmap_resource(struct gxp_dev *gxp, uint core_list,
- dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction,
+void gxp_dma_unmap_resource(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction,
unsigned long attrs);
/**
* gxp_dma_map_sg() - Create a mapping for a scatter-gather list
* @gxp: The GXP device to map the scatter-gather list for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the mapping is for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
* @sg: The scatter-gather list of the buffer to be mapped
* @nents: The number of entries in @sg
* @direction: DMA direction
@@ -251,22 +379,30 @@ void gxp_dma_unmap_resource(struct gxp_dev *gxp, uint core_list,
* @gxp_dma_flags: The type of mapping to create; Currently unused
*
* Return: The number of scatter-gather entries mapped to
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-int gxp_dma_map_sg(struct gxp_dev *gxp, uint core_list, struct scatterlist *sg,
- int nents, enum dma_data_direction direction,
- unsigned long attrs, uint gxp_dma_flags);
+int gxp_dma_map_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ int virt_core_list, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction, unsigned long attrs,
+ uint gxp_dma_flags);
/**
* gxp_dma_unmap_sg() - Unmap a scatter-gather list
* @gxp: The GXP device the scatter-gather list was mapped for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the mapping was for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping was for
* @sg: The scatter-gather list to unmap; The same one passed to
* `gxp_dma_map_sg()`
* @nents: The number of entries in @sg; Same value passed to `gxp_dma_map_sg()`
* @direction: DMA direction; Same as passed to `gxp_dma_map_sg()`
* @attrs: The same set of flags used by the base DMA API
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-void gxp_dma_unmap_sg(struct gxp_dev *gxp, uint core_list,
- struct scatterlist *sg, int nents,
+void gxp_dma_unmap_sg(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core_list, struct scatterlist *sg, int nents,
enum dma_data_direction direction, unsigned long attrs);
/**
@@ -312,30 +448,40 @@ void gxp_dma_sync_sg_for_device(struct gxp_dev *gxp, struct scatterlist *sg,
/**
* gxp_dma_map_dmabuf_attachment() - Create a mapping for a dma-buf
* @gxp: The GXP device to map the dma-buf for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the dma-buf is for
+ * @virt_core_list: A bitfield enumerating the virtual cores the dma-buf is for
* @attachment: An attachment, representing the dma-buf, obtained from
* `dma_buf_attach()`
* @direction: DMA direction
*
* Return: A scatter-gather table describing the mapping of the dma-buf
* into the default IOMMU domain. Returns ERR_PTR on failure.
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-struct sg_table *
-gxp_dma_map_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
- struct dma_buf_attachment *attachment,
- enum dma_data_direction direction);
+struct sg_table *gxp_dma_map_dmabuf_attachment(
+ struct gxp_dev *gxp, struct gxp_virtual_device *vd, uint virt_core_list,
+ struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction);
/**
* gxp_dma_unmap_dmabuf_attachment() - Unmap a dma-buf
* @gxp: The GXP device the dma-buf was mapped for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device including the virtual cores the dma-buf is for
+ * @virt_core_list: A bitfield enumerating the virtual cores the dma-buf was for
* @attachment: The attachment, representing the dma-buf, that was passed to
* `gxp_dma_map_dmabuf_attachment()` to create the mapping
* @sgt: The scatter-gather table returned by `gxp_dma_map_dmabuf_attachment()`
* when mapping this dma-buf
* @direction: DMA direction
+ *
+ * The caller must make sure @vd will not be released for the duration of the
+ * call.
*/
-void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp, uint core_list,
+void gxp_dma_unmap_dmabuf_attachment(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list,
struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction);
diff --git a/gxp-dmabuf.c b/gxp-dmabuf.c
index 5d0cf9b..c3ef1eb 100644
--- a/gxp-dmabuf.c
+++ b/gxp-dmabuf.c
@@ -29,8 +29,10 @@ struct gxp_dmabuf_mapping {
struct sg_table *sgt;
};
-struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp, uint core_list, int fd,
- u32 flags, enum dma_data_direction dir)
+struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, int fd, u32 flags,
+ enum dma_data_direction dir)
{
struct dma_buf *dmabuf;
struct dma_buf_attachment *attachment;
@@ -53,7 +55,7 @@ struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp, uint core_list, int fd,
goto err_attach;
}
- sgt = gxp_dma_map_dmabuf_attachment(gxp, core_list, attachment, dir);
+ sgt = gxp_dma_map_dmabuf_attachment(gxp, vd, virt_core_list, attachment, dir);
if (IS_ERR(sgt)) {
dev_err(gxp->dev,
"Failed to map dma-buf attachment (ret=%ld)\n",
@@ -70,7 +72,7 @@ struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp, uint core_list, int fd,
/* dma-buf mappings are indicated by a host_address of 0 */
dmabuf_mapping->mapping.host_address = 0;
- dmabuf_mapping->mapping.core_list = core_list;
+ dmabuf_mapping->mapping.virt_core_list = virt_core_list;
dmabuf_mapping->mapping.device_address = sg_dma_address(sgt->sgl);
dmabuf_mapping->mapping.dir = dir;
dmabuf_mapping->dmabuf = dmabuf;
@@ -88,7 +90,7 @@ struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp, uint core_list, int fd,
err_put_mapping:
kfree(dmabuf_mapping);
err_alloc_mapping:
- gxp_dma_unmap_dmabuf_attachment(gxp, core_list, attachment, sgt, dir);
+ gxp_dma_unmap_dmabuf_attachment(gxp, vd, virt_core_list, attachment, sgt, dir);
err_map_attachment:
dma_buf_detach(dmabuf, attachment);
err_attach:
@@ -96,7 +98,8 @@ err_attach:
return ERR_PTR(ret);
}
-void gxp_dmabuf_unmap(struct gxp_dev *gxp, dma_addr_t device_address)
+void gxp_dmabuf_unmap(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ dma_addr_t device_address)
{
struct gxp_dmabuf_mapping *dmabuf_mapping;
struct gxp_mapping *mapping;
@@ -118,7 +121,7 @@ void gxp_dmabuf_unmap(struct gxp_dev *gxp, dma_addr_t device_address)
dmabuf_mapping =
container_of(mapping, struct gxp_dmabuf_mapping, mapping);
- gxp_dma_unmap_dmabuf_attachment(gxp, mapping->core_list,
+ gxp_dma_unmap_dmabuf_attachment(gxp, vd, mapping->virt_core_list,
dmabuf_mapping->attachment,
dmabuf_mapping->sgt, mapping->dir);
dma_buf_detach(dmabuf_mapping->dmabuf, dmabuf_mapping->attachment);
diff --git a/gxp-dmabuf.h b/gxp-dmabuf.h
index 87ac7cc..bff95ea 100644
--- a/gxp-dmabuf.h
+++ b/gxp-dmabuf.h
@@ -16,7 +16,8 @@
/**
* gxp_dmabuf_map() - Map a dma-buf for access by the specified physical cores
* @gxp: The GXP device to map the dma-buf for
- * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @vd: The virtual device includes the virtual cores the dma-buf is mapped for
+ * @virt_core_list: A bitfield enumerating the virtual cores the mapping is for
* @fd: A file descriptor for the dma-buf to be mapped
* @flags: The type of mapping to create; Currently unused
* @direction: DMA direction
@@ -24,16 +25,20 @@
* Return: The structure that was created and is being tracked to describe the
* mapping of the dma-buf. Returns ERR_PTR on failure.
*/
-struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp, uint core_list, int fd,
- u32 flags, enum dma_data_direction dir);
+struct gxp_mapping *gxp_dmabuf_map(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, int fd, u32 flags,
+ enum dma_data_direction dir);
/**
* gxp_dmabuf_unmap - Unmap a dma-buf previously mapped with `gxp_dmabuf_map()`
* @gxp: The GXP device the dma-buf was mapped for.
+ * @vd: The virtual device includes the virtual cores the dma-buf was mapped for
* @device_address: The IOVA the dma-buf was mapped to. Should be obtained from
* the `device_address` field of the `struct gxp_mapping`
* returned by `gxp_dmabuf_map()`
*/
-void gxp_dmabuf_unmap(struct gxp_dev *gxp, dma_addr_t device_address);
+void gxp_dmabuf_unmap(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ dma_addr_t device_address);
#endif /* __GXP_DMABUF_H__ */
diff --git a/gxp-eventfd.c b/gxp-eventfd.c
new file mode 100644
index 0000000..ed0170f
--- /dev/null
+++ b/gxp-eventfd.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP eventfd
+ *
+ * Copyright (C) 202d Google LLC
+ */
+
+#include <linux/eventfd.h>
+#include <linux/refcount.h>
+#include <linux/slab.h>
+
+#include "gxp-eventfd.h"
+
+struct gxp_eventfd {
+ struct eventfd_ctx *ctx;
+ refcount_t refcount;
+};
+
+struct gxp_eventfd *gxp_eventfd_create(int fd)
+{
+ struct gxp_eventfd *efd;
+ int err;
+
+ efd = kmalloc(sizeof(*efd), GFP_KERNEL);
+ if (!efd)
+ return ERR_PTR(-ENOMEM);
+
+ efd->ctx = eventfd_ctx_fdget(fd);
+ if (IS_ERR(efd->ctx)) {
+ err = PTR_ERR(efd->ctx);
+ goto error;
+ }
+
+ refcount_set(&efd->refcount, 1);
+
+ return efd;
+
+error:
+ kfree(efd);
+ return ERR_PTR(err);
+}
+
+bool gxp_eventfd_get(struct gxp_eventfd *eventfd)
+{
+ return refcount_inc_not_zero(&eventfd->refcount);
+}
+
+bool gxp_eventfd_put(struct gxp_eventfd *eventfd)
+{
+ bool refcount_is_zero;
+
+ refcount_is_zero = refcount_dec_and_test(&eventfd->refcount);
+ if (refcount_is_zero) {
+ eventfd_ctx_put(eventfd->ctx);
+ kfree(eventfd);
+ }
+
+ return refcount_is_zero;
+}
+
+bool gxp_eventfd_signal(struct gxp_eventfd *eventfd)
+{
+ bool ret;
+
+ ret = gxp_eventfd_get(eventfd);
+ if (ret)
+ eventfd_signal(eventfd->ctx, 1);
+
+ gxp_eventfd_put(eventfd);
+
+ return ret;
+}
diff --git a/gxp-eventfd.h b/gxp-eventfd.h
new file mode 100644
index 0000000..6a23200
--- /dev/null
+++ b/gxp-eventfd.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP eventfd
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#ifndef __GXP_EVENTFD_H__
+#define __GXP_EVENTFD_H__
+
+#include "gxp-internal.h"
+
+struct gxp_eventfd;
+
+/**
+ * gxp_eventfd_create() - Open and initialize an eventfd
+ * @fd: A file descriptor from user-space describing an eventfd
+ *
+ * If successful, the gxp_eventfd will be returned with a reference count of 1.
+ *
+ * Return: A pointer to the new gxp_eventfd or an ERR_PTR on failure
+ * * -ENOMEM: Insufficient memory to create the gxp_eventfd
+ * * other: Failed to obtain an eventfd from @fd
+ */
+struct gxp_eventfd *gxp_eventfd_create(int fd);
+
+/**
+ * gxp_eventfd_get() - Increment an existing gxp_eventfd's reference count
+ * @eventfd: The gxp_eventfd to get a reference to
+ *
+ * Return: true on success, false if the eventfd's reference count was already 0
+ */
+bool gxp_eventfd_get(struct gxp_eventfd *eventfd);
+
+/**
+ * gxp_eventfd_put() - Decrement an eventfd's reference count
+ * @eventfd: The gxp_eventfd to close a reference to, and potentially free
+ *
+ * If the reference count drops to 0, the @eventfd will be freed.
+ *
+ * Return: true if the reference count dropped to 0 and the gxp_eventfd was
+ * released, otherwise false
+ */
+bool gxp_eventfd_put(struct gxp_eventfd *eventfd);
+
+/**
+ * gxp_eventfd_signal() - Signal an eventfd.
+ * @eventfd: The gxp_eventfd to signal
+ *
+ * Return: true on success, false if the gxp_eventfd had a reference count of 0
+ */
+bool gxp_eventfd_signal(struct gxp_eventfd *eventfd);
+
+#endif /* __GXP_EVENTFD_H__ */
diff --git a/gxp-firmware-data.c b/gxp-firmware-data.c
index 2f0c84c..0cfc7b4 100644
--- a/gxp-firmware-data.c
+++ b/gxp-firmware-data.c
@@ -9,12 +9,11 @@
#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
-#include "gxp.h"
#include "gxp-firmware-data.h"
#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
#include "gxp-range-alloc.h"
-#include "gxp-tmp.h"
+#include "gxp.h"
/*
* The minimum alignment order (power of 2) of allocations in the firmware data
@@ -27,11 +26,9 @@
/* IDs for dedicated doorbells used by some system components */
#define DOORBELL_ID_CORE_WAKEUP 0
-#define DOORBELL_ID_SW_MBX(_core_) (31 - _core_)
/* IDs for dedicated sync barriers used by some system components */
#define SYNC_BARRIER_ID_UART 1
-#define SYNC_BARRIER_ID_SW_MBX 15
/* Default application parameters */
#define DEFAULT_APP_ID 1
@@ -73,16 +70,10 @@ struct gxp_fw_data_manager {
/* Doorbells allocator and reserved doorbell IDs */
struct range_alloc *doorbell_allocator;
int cores_wakeup_doorbell;
-#ifdef CONFIG_GXP_USE_SW_MAILBOX
- int core_sw_mailbox_doorbells[NUM_CORES];
-#endif // CONFIG_GXP_USE_SW_MAILBOX
int semaphore_doorbells[NUM_CORES];
/* Sync barriers allocator and reserved sync barrier IDs */
struct range_alloc *sync_barrier_allocator;
-#ifdef CONFIG_GXP_USE_SW_MAILBOX
- int sw_mailbox_barrier;
-#endif // CONFIG_GXP_USE_SW_MAILBOX
int uart_sync_barrier;
int timer_regions_barrier;
int watchdog_region_barrier;
@@ -506,15 +497,6 @@ int gxp_fw_data_init(struct gxp_dev *gxp)
if (res)
goto err;
-#ifdef CONFIG_GXP_USE_SW_MAILBOX
- /* Pinned: SW mailbox doorbells */
- for (i = 0; i < NUM_CORES; i++) {
- mgr->core_sw_mailbox_doorbells[i] = DOORBELL_ID_SW_MBX(i);
- range_alloc_get(mgr->doorbell_allocator,
- mgr->core_sw_mailbox_doorbells[i]);
- }
-#endif // CONFIG_GXP_USE_SW_MAILBOX
-
/* Semaphores operation doorbells */
for (i = 0; i < NUM_CORES; i++) {
range_alloc_get_any(mgr->doorbell_allocator,
@@ -531,15 +513,6 @@ int gxp_fw_data_init(struct gxp_dev *gxp)
if (res)
goto err;
-#ifdef CONFIG_GXP_USE_SW_MAILBOX
- /* Pinned: SW MBX sync barrier */
- mgr->sw_mailbox_barrier = SYNC_BARRIER_ID_SW_MBX;
- res = range_alloc_get(mgr->sync_barrier_allocator,
- mgr->sw_mailbox_barrier);
- if (res)
- goto err;
-#endif // CONFIG_GXP_USE_SW_MAILBOX
-
/* Doorbell regions for all apps */
res = range_alloc_get_any(mgr->sync_barrier_allocator,
&mgr->doorbell_regions_barrier);
diff --git a/gxp-firmware.c b/gxp-firmware.c
index 2af783e..df4e192 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -26,7 +26,7 @@
#include "gxp-notification.h"
#include "gxp-pm.h"
#include "gxp-telemetry.h"
-#include "gxp-tmp.h"
+#include "gxp-vd.h"
/* TODO (b/176984045): Clean up gxp-firmware.c */
@@ -303,10 +303,8 @@ out_firmware_unload:
static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
{
u32 offset;
- u32 psm_status;
u32 expected_top_value;
- void __iomem *core_psm_base, *core_scratchpad_base, *addr;
- uint state;
+ void __iomem *core_scratchpad_base;
int ctr;
/* Raise wakeup doorbell */
@@ -316,21 +314,12 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
gxp_doorbell_set(gxp, CORE_WAKEUP_DOORBELL);
/* Wait for core to come up */
- dev_notice(gxp->dev, "Waiting for core %u to power up...\n",
- core);
- core_psm_base =
- ((u8 *)gxp->regs.vaddr) + LPM_BLOCK + CORE_PSM_BASE(core);
+ dev_notice(gxp->dev, "Waiting for core %u to power up...\n", core);
ctr = 1000;
while (ctr) {
- addr = core_psm_base + PSM_STATUS_OFFSET;
- psm_status = (u32) readl(addr); /* 0x60041688 */
- if (psm_status & PSM_STATE_VALID_MASK) {
- state = psm_status & PSM_CURR_STATE_MASK;
- if ((state == PSM_STATE_ACTIVE)
- || (state == PSM_STATE_CLK_GATED))
- break;
- }
- cpu_relax();
+ if (gxp_lpm_is_powered(gxp, core))
+ break;
+ udelay(1 * GXP_TIME_DELAY_FACTOR);
ctr--;
}
@@ -346,18 +335,20 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
core_scratchpad_base = gxp->fwbufs[core].vaddr + AURORA_SCRATCHPAD_OFF;
- ctr = 500;
- offset = SCRATCHPAD_MSG_OFFSET(MSG_CORE_ALIVE);
- while (ctr--) {
- if (readl(core_scratchpad_base + offset) == Q7_ALIVE_MAGIC)
- break;
- msleep(1 * GXP_TIME_DELAY_FACTOR);
- }
/*
* Currently, the hello_world FW writes a magic number
* (Q7_ALIVE_MAGIC) to offset MSG_CORE_ALIVE in the scratchpad
* space as an alive message
*/
+ ctr = 5000;
+ offset = SCRATCHPAD_MSG_OFFSET(MSG_CORE_ALIVE);
+ usleep_range(500 * GXP_TIME_DELAY_FACTOR, 1000 * GXP_TIME_DELAY_FACTOR);
+ while (ctr--) {
+ if (readl(core_scratchpad_base + offset) == Q7_ALIVE_MAGIC)
+ break;
+ usleep_range(1 * GXP_TIME_DELAY_FACTOR,
+ 10 * GXP_TIME_DELAY_FACTOR);
+ }
if (readl(core_scratchpad_base + offset) != Q7_ALIVE_MAGIC) {
dev_err(gxp->dev, "Core %u did not respond!\n", core);
return -EIO;
@@ -377,11 +368,14 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
* handshaking in Gem5.
*/
/* TODO (b/182528386): Fix handshake for verifying TOP access */
+ ctr = 1000;
offset = SCRATCHPAD_MSG_OFFSET(MSG_TOP_ACCESS_OK);
expected_top_value = BIT(0);
-#ifdef CONFIG_GXP_USE_SW_MAILBOX
- expected_top_value |= BIT(31 - core);
-#endif // CONFIG_GXP_USE_SW_MAILBOX
+ while (ctr--) {
+ if (readl(core_scratchpad_base + offset) == expected_top_value)
+ break;
+ udelay(1 * GXP_TIME_DELAY_FACTOR);
+ }
if (readl(core_scratchpad_base + offset) != expected_top_value) {
dev_err(gxp->dev, "TOP access from core %u failed!\n", core);
return -EIO;
@@ -479,7 +473,8 @@ void gxp_fw_destroy(struct gxp_dev *gxp)
*/
}
-int gxp_firmware_run(struct gxp_dev *gxp, uint core)
+int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core, uint core)
{
int ret = 0;
struct work_struct *work;
@@ -503,21 +498,24 @@ int gxp_firmware_run(struct gxp_dev *gxp, uint core)
goto out_firmware_unload;
}
+ /* Switch PLL_CON0_NOC_USER MUX to the normal state to guarantee LPM works */
+ gxp_pm_force_cmu_noc_user_mux_normal(gxp);
ret = gxp_firmware_handshake(gxp, core);
if (ret) {
dev_err(gxp->dev, "Firmware handshake failed on core %u\n",
core);
gxp_pm_core_off(gxp, core);
- goto out_firmware_unload;
+ goto out_check_noc_user_mux;
}
-
- /* Initialize ioctl response queues */
- INIT_LIST_HEAD(&(gxp->mailbox_resp_queues[core]));
- init_waitqueue_head(&(gxp->mailbox_resp_waitqs[core]));
+ /*
+ * Check if we need to set PLL_CON0_NOC_USER MUX to low state for
+ * AUR_READY requested state.
+ */
+ gxp_pm_check_cmu_noc_user_mux(gxp);
/* Initialize mailbox */
- gxp->mailbox_mgr->mailboxes[core] = gxp_mailbox_alloc(gxp->mailbox_mgr,
- core);
+ gxp->mailbox_mgr->mailboxes[core] =
+ gxp_mailbox_alloc(gxp->mailbox_mgr, vd, virt_core, core);
if (IS_ERR(gxp->mailbox_mgr->mailboxes[core])) {
dev_err(gxp->dev,
"Unable to allocate mailbox (core=%u, ret=%ld)\n", core,
@@ -541,16 +539,16 @@ int gxp_firmware_run(struct gxp_dev *gxp, uint core)
return ret;
+out_check_noc_user_mux:
+ gxp_pm_check_cmu_noc_user_mux(gxp);
out_firmware_unload:
gxp_firmware_unload(gxp, core);
return ret;
}
-void gxp_firmware_stop(struct gxp_dev *gxp, uint core)
+void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core, uint core)
{
- struct gxp_async_response *cur, *nxt;
- unsigned long flags;
-
if (!(gxp->firmware_running & BIT(core)))
dev_err(gxp->dev, "Firmware is not running on core %u\n", core);
@@ -561,25 +559,10 @@ void gxp_firmware_stop(struct gxp_dev *gxp, uint core)
gxp_notification_unregister_handler(gxp, core,
HOST_NOTIF_TELEMETRY_STATUS);
- gxp_mailbox_release(gxp->mailbox_mgr,
+ gxp_mailbox_release(gxp->mailbox_mgr, vd, virt_core,
gxp->mailbox_mgr->mailboxes[core]);
dev_notice(gxp->dev, "Mailbox %u released\n", core);
- /*
- * TODO(b/226211187) response queues should be owned by VDs
- * This step should not be necessary until a VD is destroyed once the
- * queues are owned directly by the VD and not shared by all users of
- * a physical core.
- */
- /* Flush and free any abandoned responses left in the queue */
- spin_lock_irqsave(&gxp->mailbox_resps_lock, flags);
- list_for_each_entry_safe(cur, nxt, &gxp->mailbox_resp_queues[core],
- list_entry) {
- list_del(&cur->list_entry);
- kfree(cur);
- }
- spin_unlock_irqrestore(&gxp->mailbox_resps_lock, flags);
-
gxp_pm_core_off(gxp, core);
gxp_firmware_unload(gxp, core);
}
diff --git a/gxp-firmware.h b/gxp-firmware.h
index 616289f..6b1dff0 100644
--- a/gxp-firmware.h
+++ b/gxp-firmware.h
@@ -11,6 +11,29 @@
#include "gxp-internal.h"
+#if !IS_ENABLED(CONFIG_GXP_TEST)
+
+#define AURORA_SCRATCHPAD_OFF 0x00F00000 /* Last 1M of ELF load region */
+#define AURORA_SCRATCHPAD_LEN 0x00100000 /* 1M */
+
+#else /* CONFIG_GXP_TEST */
+/* Firmware memory is shrunk in unit tests. */
+#define AURORA_SCRATCHPAD_OFF 0x000F0000
+#define AURORA_SCRATCHPAD_LEN 0x00010000
+
+#endif /* CONFIG_GXP_TEST */
+
+#define Q7_ALIVE_MAGIC 0x55555555
+
+#define CORE_SCRATCHPAD_BASE(_core_) (_core_ << 16)
+#define SCRATCHPAD_MSG_OFFSET(_msg_) (_msg_ << 2)
+
+enum aurora_msg {
+ MSG_CORE_ALIVE,
+ MSG_TOP_ACCESS_OK,
+ MSG_SCRATCHPAD_MAX,
+};
+
static inline bool gxp_is_fw_running(struct gxp_dev *gxp, uint core)
{
return (gxp->firmware_running & BIT(core)) != 0;
@@ -31,10 +54,12 @@ void gxp_fw_destroy(struct gxp_dev *gxp);
* Loads the firmware for the specified core in system memory and powers up the
* core to start FW execution.
*/
-int gxp_firmware_run(struct gxp_dev *gxp, uint core);
+int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core, uint core);
/*
* Shuts down the specified core.
*/
-void gxp_firmware_stop(struct gxp_dev *gxp, uint core);
+void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint virt_core, uint core);
#endif /* __GXP_FIRMWARE_H__ */
diff --git a/gxp-hw-mailbox-driver.c b/gxp-hw-mailbox-driver.c
index 4858585..c07047f 100644
--- a/gxp-hw-mailbox-driver.c
+++ b/gxp-hw-mailbox-driver.c
@@ -10,10 +10,9 @@
#include <linux/kthread.h>
#include <linux/of_irq.h>
-#include "gxp-tmp.h"
-#include "gxp-mailbox.h"
#include "gxp-mailbox-driver.h"
#include "gxp-mailbox-regs.h"
+#include "gxp-mailbox.h"
static u32 csr_read(struct gxp_mailbox *mailbox, uint reg_offset)
{
diff --git a/gxp-internal.h b/gxp-internal.h
index c7b66e7..4b5bb7c 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -20,11 +20,11 @@
#include <linux/spinlock.h>
#include "gxp-config.h"
-#include "gxp-tmp.h"
/* Holds Client's TPU mailboxes info used during mapping */
struct gxp_tpu_mbx_desc {
uint phys_core_list;
+ uint virt_core_list;
size_t cmdq_size, respq_size;
};
@@ -36,11 +36,6 @@ struct gxp_mapped_resource {
resource_size_t size; /* size in bytes */
};
-struct mailbox_resp_list {
- struct list_head list;
- struct gxp_response *resp;
-};
-
/* Structure to hold TPU device info */
struct gxp_tpu_dev {
struct device *dev;
@@ -71,13 +66,6 @@ struct gxp_dev {
struct gxp_mapped_resource cmu; /* CMU CSRs */
struct gxp_mailbox_manager *mailbox_mgr;
struct gxp_power_manager *power_mgr;
- /*
- * TODO(b/182416287): This should be a rb_tree of lists keyed by
- * virtual device. For now, keep an array of one list per physical core
- */
- struct list_head mailbox_resp_queues[GXP_NUM_CORES];
- wait_queue_head_t mailbox_resp_waitqs[GXP_NUM_CORES];
- spinlock_t mailbox_resps_lock;
struct gxp_debug_dump_manager *debug_dump_mgr;
struct gxp_mapping_root *mappings; /* tree of user mappings */
u32 firmware_running; /* firmware status bitmap */
@@ -142,61 +130,6 @@ static inline void gxp_write_32_core(struct gxp_dev *gxp, uint core,
gxp_write_32(gxp, offset, value);
}
-static inline void gxp_acquire_sync_barrier(struct gxp_dev *gxp, uint index)
-{
- uint barrier_reg_offset;
-
- if (index >= SYNC_BARRIER_COUNT) {
- dev_err(gxp->dev,
- "Attempt to acquire non-existent sync barrier: %d\n",
- index);
- return;
- }
-
- barrier_reg_offset = SYNC_BARRIER_BLOCK + SYNC_BARRIER_BASE(index);
- while (gxp_read_32(gxp, barrier_reg_offset) !=
- SYNC_BARRIER_FREE_VALUE) {
- /*
- * Sleep for the minimum amount.
- * msleep(1~20) may not do what the caller intends, and will
- * often sleep longer (~20 ms actual sleep for any value given
- * in the 1~20ms range).
- */
- msleep(20);
- }
-}
-
-static inline void gxp_release_sync_barrier(struct gxp_dev *gxp, uint index)
-{
- uint barrier_reg_offset;
-
- if (index >= SYNC_BARRIER_COUNT) {
- dev_err(gxp->dev,
- "Attempt to acquire non-existent sync barrier: %d\n",
- index);
- return;
- }
-
- barrier_reg_offset = SYNC_BARRIER_BLOCK + SYNC_BARRIER_BASE(index);
- gxp_write_32(gxp, barrier_reg_offset, 1);
-}
-static inline u32 gxp_read_sync_barrier_shadow(struct gxp_dev *gxp, uint index)
-{
- uint barrier_reg_offset;
-
- if (index >= SYNC_BARRIER_COUNT) {
- dev_err(gxp->dev,
- "Attempt to read non-existent sync barrier: %0u\n",
- index);
- return 0;
- }
-
- barrier_reg_offset = SYNC_BARRIER_BLOCK + SYNC_BARRIER_BASE(index) +
- SYNC_BARRIER_SHADOW_OFFSET;
-
- return gxp_read_32(gxp, barrier_reg_offset);
-}
-
static inline int gxp_acquire_rmem_resource(struct gxp_dev *gxp,
struct resource *r, char *phandle)
{
diff --git a/gxp-lpm.c b/gxp-lpm.c
index aaf37e1..8367375 100644
--- a/gxp-lpm.c
+++ b/gxp-lpm.c
@@ -18,7 +18,6 @@
#include "gxp-doorbell.h"
#include "gxp-internal.h"
#include "gxp-lpm.h"
-#include "gxp-tmp.h"
static void enable_state(struct gxp_dev *gxp, uint psm, uint state)
{
@@ -50,6 +49,17 @@ bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm)
return false;
}
+bool gxp_lpm_is_powered(struct gxp_dev *gxp, uint psm)
+{
+ u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
+ u32 state;
+
+ if (!(status & PSM_STATE_VALID_MASK))
+ return false;
+ state = status & PSM_CURR_STATE_MASK;
+ return state == LPM_ACTIVE_STATE || state == LPM_CG_STATE;
+}
+
static uint get_state(struct gxp_dev *gxp, uint psm)
{
u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
@@ -73,7 +83,7 @@ static int set_state_internal(struct gxp_dev *gxp, uint psm, uint target_state)
/* Wait for LPM init done (0x60041688) */
while (i && !(lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET)
& PSM_INIT_DONE_MASK)) {
- cpu_relax();
+ udelay(1 * GXP_TIME_DELAY_FACTOR);
i--;
}
@@ -132,12 +142,11 @@ static int psm_enable(struct gxp_dev *gxp, uint psm)
/* Write PSM start bit */
lpm_write_32_psm(gxp, psm, PSM_START_OFFSET, PSM_START);
- msleep(20 * GXP_TIME_DELAY_FACTOR);
/* Wait for LPM init done (0x60041688) */
while (i && !(lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET)
& PSM_INIT_DONE_MASK)) {
- cpu_relax();
+ udelay(1 * GXP_TIME_DELAY_FACTOR);
i--;
}
@@ -189,15 +198,6 @@ int gxp_lpm_up(struct gxp_dev *gxp, uint core)
gxp_bpm_start(gxp, core);
-#ifdef CONFIG_GXP_USE_SW_MAILBOX
- /*
- * Enable doorbells [28-31] for SW mailbox.
- * TODO (b/182526648): Enable doorbells required for SW mailbox in the
- * driver's alloc function.
- */
- gxp_write_32_core(gxp, core, GXP_REG_COMMON_INT_MASK_0, BIT(31 - core));
-#endif // CONFIG_GXP_USE_SW_MAILBOX
-
return 0;
}
diff --git a/gxp-lpm.h b/gxp-lpm.h
index a1b891a..0858104 100644
--- a/gxp-lpm.h
+++ b/gxp-lpm.h
@@ -46,6 +46,13 @@ enum lpm_state {
#define AUR_DVFS_DEBUG_REQ (1 << 31)
#define AUR_DEBUG_CORE_FREQ (AUR_DVFS_DEBUG_REQ | (3 << 27))
+#define PSM_INIT_DONE_MASK 0x80
+#define PSM_CURR_STATE_MASK 0x0F
+#define PSM_STATE_VALID_MASK 0x10
+
+#define PSM_HW_MODE 0x0
+#define PSM_START 0x1
+
/*
* Initializes the power manager for the first time after block power up.
* The function needs to be called once after a block power up event.
@@ -70,6 +77,11 @@ void gxp_lpm_down(struct gxp_dev *gxp, uint core);
*/
bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm);
+/*
+ * Return whether the specified PSM is powered.
+ */
+bool gxp_lpm_is_powered(struct gxp_dev *gxp, uint psm);
+
static inline u32 lpm_read_32(struct gxp_dev *gxp, uint reg_offset)
{
uint offset = GXP_LPM_BASE + reg_offset;
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index 405c567..4b32aa4 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -18,7 +18,6 @@
#include "gxp-mailbox.h"
#include "gxp-mailbox-driver.h"
#include "gxp-pm.h"
-#include "gxp-tmp.h"
/* Timeout of 8s by default to account for slower emulation platforms */
static int mbx_timeout = 8000;
@@ -41,20 +40,6 @@ module_param(mbx_timeout, int, 0660);
#define MBOX_RESP_QUEUE_SIZE \
(sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES)
-#ifndef CONFIG_GXP_HAS_SYSMMU
-/* Constants for static queues in systems without a SysMMU */
-
-/*
- * Queues in scratchpad space start at 0x280 to allow 0x180 of space for
- * emulated registers in systems using software mailboxes.
- */
-#define MBOX_CMD_QUEUE_SCRATCHPAD_OFFSET 0x280
-#define MBOX_RESP_QUEUE_SCRATCHPAD_OFFSET \
- (MBOX_CMD_QUEUE_SCRATCHPAD_OFFSET + MBOX_CMD_QUEUE_SIZE)
-#define MBOX_DESCRIPTOR_SCRATCHPAD_OFFSET \
- (MBOX_RESP_QUEUE_SCRATCHPAD_OFFSET + MBOX_RESP_QUEUE_SIZE)
-#endif
-
/*
* Returns the number of elements in a circular queue given its @head, @tail,
* and @queue_size.
@@ -272,11 +257,11 @@ static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
* might consume and free the response before
* this function is done with it.
*/
- if (async_resp->client) {
- gxp_client_signal_mailbox_eventfd(
- async_resp->client,
- mailbox->core_id);
+ if (async_resp->eventfd) {
+ gxp_eventfd_signal(async_resp->eventfd);
+ gxp_eventfd_put(async_resp->eventfd);
}
+
wake_up(async_resp->dest_queue_waitq);
spin_unlock_irqrestore(
@@ -416,7 +401,8 @@ static inline void gxp_mailbox_handle_irq(struct gxp_mailbox *mailbox)
#define _RESPONSE_WORKQUEUE_NAME(_x_) "gxp_responses_" #_x_
#define RESPONSE_WORKQUEUE_NAME(_x_) _RESPONSE_WORKQUEUE_NAME(_x_)
static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
- u8 core_id)
+ struct gxp_virtual_device *vd,
+ uint virt_core, u8 core_id)
{
struct gxp_mailbox *mailbox;
@@ -431,7 +417,7 @@ static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
/* Allocate and initialize the command queue */
mailbox->cmd_queue = (struct gxp_command *)gxp_dma_alloc_coherent(
- mailbox->gxp, BIT(mailbox->core_id),
+ mailbox->gxp, vd, BIT(virt_core),
sizeof(struct gxp_command) * MBOX_CMD_QUEUE_NUM_ENTRIES,
&(mailbox->cmd_queue_device_addr), GFP_KERNEL, 0);
if (!mailbox->cmd_queue)
@@ -443,7 +429,7 @@ static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
/* Allocate and initialize the response queue */
mailbox->resp_queue = (struct gxp_response *)gxp_dma_alloc_coherent(
- mailbox->gxp, BIT(mailbox->core_id),
+ mailbox->gxp, vd, BIT(virt_core),
sizeof(struct gxp_response) * MBOX_RESP_QUEUE_NUM_ENTRIES,
&(mailbox->resp_queue_device_addr), GFP_KERNEL, 0);
if (!mailbox->resp_queue)
@@ -456,7 +442,7 @@ static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
/* Allocate and initialize the mailbox descriptor */
mailbox->descriptor =
(struct gxp_mailbox_descriptor *)gxp_dma_alloc_coherent(
- mailbox->gxp, BIT(mailbox->core_id),
+ mailbox->gxp, vd, BIT(virt_core),
sizeof(struct gxp_mailbox_descriptor),
&(mailbox->descriptor_device_addr), GFP_KERNEL, 0);
if (!mailbox->descriptor)
@@ -477,18 +463,18 @@ static struct gxp_mailbox *create_mailbox(struct gxp_mailbox_manager *mgr,
return mailbox;
err_workqueue:
- gxp_dma_free_coherent(mailbox->gxp, BIT(mailbox->core_id),
+ gxp_dma_free_coherent(mailbox->gxp, vd, BIT(virt_core),
sizeof(struct gxp_mailbox_descriptor),
mailbox->descriptor,
mailbox->descriptor_device_addr);
err_descriptor:
gxp_dma_free_coherent(
- mailbox->gxp, BIT(mailbox->core_id),
+ mailbox->gxp, vd, BIT(virt_core),
sizeof(struct gxp_response) * mailbox->resp_queue_size,
mailbox->resp_queue, mailbox->resp_queue_device_addr);
err_resp_queue:
gxp_dma_free_coherent(
- mailbox->gxp, BIT(mailbox->core_id),
+ mailbox->gxp, vd, BIT(virt_core),
sizeof(struct gxp_command) * mailbox->cmd_queue_size,
mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
err_cmd_queue:
@@ -520,11 +506,12 @@ static void enable_mailbox(struct gxp_mailbox *mailbox)
}
struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
- u8 core_id)
+ struct gxp_virtual_device *vd,
+ uint virt_core, u8 core_id)
{
struct gxp_mailbox *mailbox;
- mailbox = create_mailbox(mgr, core_id);
+ mailbox = create_mailbox(mgr, vd, virt_core, core_id);
if (IS_ERR(mailbox))
return mailbox;
@@ -534,7 +521,8 @@ struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
}
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
- struct gxp_mailbox *mailbox)
+ struct gxp_virtual_device *vd, uint virt_core,
+ struct gxp_mailbox *mailbox)
{
int i;
struct gxp_mailbox_wait_list *cur, *nxt;
@@ -622,14 +610,14 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
/* Clean up resources */
gxp_dma_free_coherent(
- mailbox->gxp, BIT(mailbox->core_id),
+ mailbox->gxp, vd, BIT(virt_core),
sizeof(struct gxp_command) * mailbox->cmd_queue_size,
mailbox->cmd_queue, mailbox->cmd_queue_device_addr);
gxp_dma_free_coherent(
- mailbox->gxp, BIT(mailbox->core_id),
+ mailbox->gxp, vd, BIT(virt_core),
sizeof(struct gxp_response) * mailbox->resp_queue_size,
mailbox->resp_queue, mailbox->resp_queue_device_addr);
- gxp_dma_free_coherent(mailbox->gxp, BIT(mailbox->core_id),
+ gxp_dma_free_coherent(mailbox->gxp, vd, BIT(virt_core),
sizeof(struct gxp_mailbox_descriptor),
mailbox->descriptor,
mailbox->descriptor_device_addr);
@@ -820,10 +808,9 @@ static void async_cmd_timeout_work(struct work_struct *work)
async_resp->gxp_power_state,
async_resp->requested_aggressor, AUR_OFF, true);
- if (async_resp->client) {
- gxp_client_signal_mailbox_eventfd(
- async_resp->client,
- async_resp->mailbox->core_id);
+ if (async_resp->eventfd) {
+ gxp_eventfd_signal(async_resp->eventfd);
+ gxp_eventfd_put(async_resp->eventfd);
}
wake_up(async_resp->dest_queue_waitq);
@@ -839,7 +826,7 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
wait_queue_head_t *queue_waitq,
uint gxp_power_state, uint memory_power_state,
bool requested_aggressor,
- struct gxp_client *client)
+ struct gxp_eventfd *eventfd)
{
struct gxp_async_response *async_resp;
int ret;
@@ -854,8 +841,11 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
async_resp->dest_queue_waitq = queue_waitq;
async_resp->gxp_power_state = gxp_power_state;
async_resp->memory_power_state = memory_power_state;
- async_resp->client = client;
async_resp->requested_aggressor = requested_aggressor;
+ if (eventfd && gxp_eventfd_get(eventfd))
+ async_resp->eventfd = eventfd;
+ else
+ async_resp->eventfd = NULL;
INIT_DELAYED_WORK(&async_resp->timeout_work, async_cmd_timeout_work);
schedule_delayed_work(&async_resp->timeout_work,
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index 933c177..6900607 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -91,10 +91,10 @@ struct gxp_async_response {
uint gxp_power_state;
/* Specified memory power state vote during the command execution */
uint memory_power_state;
- /* gxp_client to signal when the response completes. May be NULL */
- struct gxp_client *client;
/* Specified whether the power state vote is requested with aggressor flag */
bool requested_aggressor;
+ /* gxp_eventfd to signal when the response completes. May be NULL */
+ struct gxp_eventfd *eventfd;
};
enum gxp_response_status {
@@ -179,8 +179,10 @@ struct gxp_mailbox_manager *gxp_mailbox_create_manager(struct gxp_dev *gxp,
*/
struct gxp_mailbox *gxp_mailbox_alloc(struct gxp_mailbox_manager *mgr,
- u8 core_id);
+ struct gxp_virtual_device *vd,
+ uint virt_core, u8 core_id);
void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
+ struct gxp_virtual_device *vd, uint virt_core,
struct gxp_mailbox *mailbox);
void gxp_mailbox_reset(struct gxp_mailbox *mailbox);
@@ -195,7 +197,7 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
wait_queue_head_t *queue_waitq,
uint gxp_power_state, uint memory_power_state,
bool requested_aggressor,
- struct gxp_client *client);
+ struct gxp_eventfd *eventfd);
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
u32 int_bit,
diff --git a/gxp-mapping.c b/gxp-mapping.c
index 5db126b..8f9359e 100644
--- a/gxp-mapping.c
+++ b/gxp-mapping.c
@@ -27,8 +27,10 @@ int gxp_mapping_init(struct gxp_dev *gxp)
return 0;
}
-struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp, uint core_list,
- u64 user_address, size_t size, u32 flags,
+struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, u64 user_address,
+ size_t size, u32 flags,
enum dma_data_direction dir)
{
struct gxp_mapping *mapping = NULL;
@@ -90,7 +92,8 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp, uint core_list,
goto error_unpin_pages;
}
mapping->host_address = user_address;
- mapping->core_list = core_list;
+ mapping->virt_core_list = virt_core_list;
+ mapping->vd = vd;
mapping->size = size;
mapping->map_count = 1;
mapping->gxp_dma_flags = flags;
@@ -104,8 +107,8 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp, uint core_list,
}
/* map the user pages */
- ret = gxp_dma_map_sg(gxp, mapping->core_list, mapping->sgt.sgl,
- mapping->sgt.nents, mapping->dir,
+ ret = gxp_dma_map_sg(gxp, mapping->vd, mapping->virt_core_list,
+ mapping->sgt.sgl, mapping->sgt.nents, mapping->dir,
DMA_ATTR_SKIP_CPU_SYNC, mapping->gxp_dma_flags);
if (!ret) {
dev_dbg(gxp->dev, "Failed to map sgt (ret=%d)\n", ret);
@@ -144,9 +147,9 @@ void gxp_mapping_destroy(struct gxp_dev *gxp, struct gxp_mapping *mapping)
* user requires a mapping be synced before unmapping, they are
* responsible for calling `gxp_mapping_sync()` before hand.
*/
- gxp_dma_unmap_sg(gxp, mapping->core_list, mapping->sgt.sgl,
- mapping->sgt.orig_nents, mapping->dir,
- DMA_ATTR_SKIP_CPU_SYNC);
+ gxp_dma_unmap_sg(gxp, mapping->vd, mapping->virt_core_list,
+ mapping->sgt.sgl, mapping->sgt.orig_nents,
+ mapping->dir, DMA_ATTR_SKIP_CPU_SYNC);
/* Unpin the user pages */
for_each_sg_page(mapping->sgt.sgl, &sg_iter, mapping->sgt.orig_nents,
@@ -255,8 +258,8 @@ int gxp_mapping_put(struct gxp_dev *gxp, struct gxp_mapping *map)
{
struct rb_node **link;
struct rb_node *parent = NULL;
- u64 device_address = map->device_address;
- struct gxp_mapping *this;
+ dma_addr_t device_address = map->device_address;
+ struct gxp_mapping *mapping;
link = &gxp->mappings->rb.rb_node;
@@ -265,11 +268,11 @@ int gxp_mapping_put(struct gxp_dev *gxp, struct gxp_mapping *map)
/* Figure out where to put new node */
while (*link) {
parent = *link;
- this = rb_entry(parent, struct gxp_mapping, node);
+ mapping = rb_entry(parent, struct gxp_mapping, node);
- if (this->device_address > device_address)
+ if (mapping->device_address > device_address)
link = &(*link)->rb_left;
- else if (this->device_address < device_address)
+ else if (mapping->device_address < device_address)
link = &(*link)->rb_right;
else
goto out;
@@ -285,42 +288,43 @@ int gxp_mapping_put(struct gxp_dev *gxp, struct gxp_mapping *map)
out:
mutex_unlock(&gxp->mappings->lock);
- dev_err(gxp->dev, "Duplicate mapping: 0x%llx", map->device_address);
+ dev_err(gxp->dev, "Duplicate mapping: %pad", &map->device_address);
return -EINVAL;
}
-struct gxp_mapping *gxp_mapping_get(struct gxp_dev *gxp, u64 device_address)
+struct gxp_mapping *gxp_mapping_get(struct gxp_dev *gxp,
+ dma_addr_t device_address)
{
struct rb_node *node;
- struct gxp_mapping *this;
+ struct gxp_mapping *mapping;
mutex_lock(&gxp->mappings->lock);
node = gxp->mappings->rb.rb_node;
while (node) {
- this = rb_entry(node, struct gxp_mapping, node);
+ mapping = rb_entry(node, struct gxp_mapping, node);
- if (this->device_address > device_address) {
+ if (mapping->device_address > device_address) {
node = node->rb_left;
- } else if (this->device_address < device_address) {
+ } else if (mapping->device_address < device_address) {
node = node->rb_right;
} else {
mutex_unlock(&gxp->mappings->lock);
- return this; /* Found it */
+ return mapping; /* Found it */
}
}
mutex_unlock(&gxp->mappings->lock);
- dev_err(gxp->dev, "Mapping not found: 0x%llx", device_address);
+ dev_err(gxp->dev, "Mapping not found: %pad", &device_address);
return NULL;
}
struct gxp_mapping *gxp_mapping_get_host(struct gxp_dev *gxp, u64 host_address)
{
struct rb_node *node;
- struct gxp_mapping *this;
+ struct gxp_mapping *mapping;
mutex_lock(&gxp->mappings->lock);
@@ -332,10 +336,10 @@ struct gxp_mapping *gxp_mapping_get_host(struct gxp_dev *gxp, u64 host_address)
/* Iterate through the elements in the rbtree */
for (node = rb_first(&gxp->mappings->rb); node; node = rb_next(node)) {
- this = rb_entry(node, struct gxp_mapping, node);
- if (this->host_address == host_address) {
+ mapping = rb_entry(node, struct gxp_mapping, node);
+ if (mapping->host_address == host_address) {
mutex_unlock(&gxp->mappings->lock);
- return this;
+ return mapping;
}
}
diff --git a/gxp-mapping.h b/gxp-mapping.h
index 3fe6293..0e5c869 100644
--- a/gxp-mapping.h
+++ b/gxp-mapping.h
@@ -28,7 +28,8 @@ struct gxp_mapping {
* should not be used if a regular buffer mapping was expected.
*/
u64 host_address;
- uint core_list;
+ uint virt_core_list;
+ struct gxp_virtual_device *vd;
/*
* `device_address` and `size` are the base address and size of the
* user buffer a mapping represents.
@@ -46,14 +47,17 @@ struct gxp_mapping {
};
int gxp_mapping_init(struct gxp_dev *gxp);
-struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp, uint core_list,
- u64 user_address, size_t size, u32 flags,
+struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core_list, u64 user_address,
+ size_t size, u32 flags,
enum dma_data_direction dir);
void gxp_mapping_destroy(struct gxp_dev *gxp, struct gxp_mapping *mapping);
int gxp_mapping_sync(struct gxp_dev *gxp, struct gxp_mapping *mapping,
u32 offset, u32 size, bool for_cpu);
int gxp_mapping_put(struct gxp_dev *gxp, struct gxp_mapping *map);
-struct gxp_mapping *gxp_mapping_get(struct gxp_dev *gxp, u64 device_address);
+struct gxp_mapping *gxp_mapping_get(struct gxp_dev *gxp,
+ dma_addr_t device_address);
struct gxp_mapping *gxp_mapping_get_host(struct gxp_dev *gxp, u64 host_address);
void gxp_mapping_remove(struct gxp_dev *gxp, struct gxp_mapping *map);
diff --git a/gxp-platform.c b/gxp-platform.c
index f702157..68f04d0 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -128,7 +128,6 @@ static int gxp_map_buffer(struct gxp_client *client,
struct gxp_map_ioctl ibuf;
struct gxp_mapping *map;
int ret = 0;
- uint phys_core_list;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
@@ -141,49 +140,21 @@ static int gxp_map_buffer(struct gxp_client *client,
"Mapped buffers must be cache line aligned and padded.\n");
return -EINVAL;
}
+ if (ibuf.virtual_core_list == 0)
+ return -EINVAL;
- /* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
+ if (!client->vd) {
dev_err(gxp->dev,
- "GXP_MAP_BUFFER requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ "GXP_MAP_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
ret = -ENODEV;
goto out;
}
- phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
- client->vd, ibuf.virtual_core_list);
- if (phys_core_list == 0) {
- ret = -EINVAL;
- goto out;
- }
-
-#ifndef CONFIG_GXP_HAS_SYSMMU
- /*
- * TODO(b/193272602) On systems without a SysMMU, all attempts to map
- * the same buffer must use the same mapping/bounce buffer or cores
- * may corrupt each others' updates to the buffer. Once mirror mapping
- * is supported, and a buffer can be mapped to multiple cores at once,
- * attempting to remap a buffer can be considered an error and this
- * check removed.
- */
- /* Check if this buffer has already been mapped */
- map = gxp_mapping_get_host(gxp, ibuf.host_address);
- if (map) {
- ibuf.device_address = map->device_address;
- if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
- ret = -EFAULT;
- goto out
- }
-
- map->map_count++;
- goto out;
- }
-#endif
-
- map = gxp_mapping_create(gxp, phys_core_list, ibuf.host_address,
- ibuf.size, /*gxp_dma_flags=*/0,
+ map = gxp_mapping_create(gxp, client->vd, ibuf.virtual_core_list,
+ ibuf.host_address, ibuf.size,
+ /*gxp_dma_flags=*/0,
mapping_flags_to_dma_dir(ibuf.flags));
if (IS_ERR(map)) {
ret = PTR_ERR(map);
@@ -226,17 +197,16 @@ static int gxp_unmap_buffer(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- /* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
+ if (!client->vd) {
dev_err(gxp->dev,
- "GXP_UNMAP_BUFFER requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ "GXP_UNMAP_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
ret = -ENODEV;
goto out;
}
- map = gxp_mapping_get(gxp, ibuf.device_address);
+ map = gxp_mapping_get(gxp, (dma_addr_t)ibuf.device_address);
if (!map) {
ret = -EINVAL;
goto out;
@@ -270,17 +240,16 @@ static int gxp_sync_buffer(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- /* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
+ if (!client->vd) {
dev_err(gxp->dev,
- "GXP_SYNC_BUFFER requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ "GXP_SYNC_BUFFER requires the client allocate a VIRTUAL_DEVICE\n");
ret = -ENODEV;
goto out;
}
- map = gxp_mapping_get(gxp, ibuf.device_address);
+ map = gxp_mapping_get(gxp, (dma_addr_t)ibuf.device_address);
if (!map) {
ret = -EINVAL;
goto out;
@@ -303,7 +272,7 @@ gxp_mailbox_command_compat(struct gxp_client *client,
struct gxp_mailbox_command_compat_ioctl ibuf;
struct gxp_command cmd;
struct buffer_descriptor buffer;
- int phys_core;
+ int virt_core, phys_core;
int ret = 0;
uint gxp_power_state, memory_power_state;
@@ -319,15 +288,18 @@ gxp_mailbox_command_compat(struct gxp_client *client,
if (!client->has_vd_wakelock) {
dev_err(gxp->dev,
"GXP_MAILBOX_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- ret = -ENODEV;
- goto out;
+ up_read(&client->semaphore);
+ return -ENODEV;
}
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
+ down_read(&gxp->vd_semaphore);
+
+ virt_core = ibuf.virtual_core_id;
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virt_core);
if (phys_core < 0) {
dev_err(gxp->dev,
"Mailbox command failed: Invalid virtual core id (%u)\n",
- ibuf.virtual_core_id);
+ virt_core);
ret = -EINVAL;
goto out;
}
@@ -359,13 +331,13 @@ gxp_mailbox_command_compat(struct gxp_client *client,
gxp_power_state = AUR_OFF;
memory_power_state = AUR_MEM_UNDEFINED;
- down_read(&gxp->vd_semaphore);
ret = gxp_mailbox_execute_cmd_async(
gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
- &gxp->mailbox_resp_queues[phys_core], &gxp->mailbox_resps_lock,
- &gxp->mailbox_resp_waitqs[phys_core], gxp_power_state,
- memory_power_state, true, client);
- up_read(&gxp->vd_semaphore);
+ &client->vd->mailbox_resp_queues[virt_core].queue,
+ &client->vd->mailbox_resp_queues[virt_core].lock,
+ &client->vd->mailbox_resp_queues[virt_core].waitq,
+ gxp_power_state, memory_power_state, true,
+ client->mb_eventfds[virt_core]);
if (ret) {
dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
ret);
@@ -380,6 +352,7 @@ gxp_mailbox_command_compat(struct gxp_client *client,
}
out:
+ up_read(&gxp->vd_semaphore);
up_read(&client->semaphore);
return ret;
@@ -392,7 +365,7 @@ static int gxp_mailbox_command(struct gxp_client *client,
struct gxp_mailbox_command_ioctl ibuf;
struct gxp_command cmd;
struct buffer_descriptor buffer;
- int phys_core;
+ int virt_core, phys_core;
int ret = 0;
uint gxp_power_state, memory_power_state;
bool requested_aggressor = false;
@@ -424,15 +397,18 @@ static int gxp_mailbox_command(struct gxp_client *client,
if (!client->has_vd_wakelock) {
dev_err(gxp->dev,
"GXP_MAILBOX_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- ret = -ENODEV;
- goto out;
+ up_read(&client->semaphore);
+ return -ENODEV;
}
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
+ down_read(&gxp->vd_semaphore);
+
+ virt_core = ibuf.virtual_core_id;
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virt_core);
if (phys_core < 0) {
dev_err(gxp->dev,
"Mailbox command failed: Invalid virtual core id (%u)\n",
- ibuf.virtual_core_id);
+ virt_core);
ret = -EINVAL;
goto out;
}
@@ -465,13 +441,13 @@ static int gxp_mailbox_command(struct gxp_client *client,
memory_power_state = aur_memory_state_array[ibuf.memory_power_state];
requested_aggressor = (ibuf.power_flags & GXP_POWER_NON_AGGRESSOR) == 0;
- down_read(&gxp->vd_semaphore);
ret = gxp_mailbox_execute_cmd_async(
gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
- &gxp->mailbox_resp_queues[phys_core], &gxp->mailbox_resps_lock,
- &gxp->mailbox_resp_waitqs[phys_core], gxp_power_state,
- memory_power_state, requested_aggressor, client);
- up_read(&gxp->vd_semaphore);
+ &client->vd->mailbox_resp_queues[virt_core].queue,
+ &client->vd->mailbox_resp_queues[virt_core].lock,
+ &client->vd->mailbox_resp_queues[virt_core].waitq,
+ gxp_power_state, memory_power_state, requested_aggressor,
+ client->mb_eventfds[virt_core]);
if (ret) {
dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
ret);
@@ -486,6 +462,7 @@ static int gxp_mailbox_command(struct gxp_client *client,
}
out:
+ up_read(&gxp->vd_semaphore);
up_read(&client->semaphore);
return ret;
@@ -495,10 +472,9 @@ static int gxp_mailbox_response(struct gxp_client *client,
struct gxp_mailbox_response_ioctl __user *argp)
{
struct gxp_dev *gxp = client->gxp;
- u16 virtual_core_id;
struct gxp_mailbox_response_ioctl ibuf;
struct gxp_async_response *resp_ptr;
- int phys_core;
+ int virt_core;
unsigned long flags;
int ret = 0;
@@ -515,23 +491,16 @@ static int gxp_mailbox_response(struct gxp_client *client,
goto out;
}
- virtual_core_id = ibuf.virtual_core_id;
- phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
- if (phys_core < 0) {
+ virt_core = ibuf.virtual_core_id;
+ if (virt_core >= client->vd->num_cores) {
dev_err(gxp->dev, "Mailbox response failed: Invalid virtual core id (%u)\n",
- virtual_core_id);
- ret = -EINVAL;
- goto out;
- }
-
- if (!gxp_is_fw_running(gxp, phys_core)) {
- dev_err(gxp->dev, "Cannot process mailbox response for core %d when firmware isn't running\n",
- phys_core);
+ virt_core);
ret = -EINVAL;
goto out;
}
- spin_lock_irqsave(&gxp->mailbox_resps_lock, flags);
+ spin_lock_irqsave(&client->vd->mailbox_resp_queues[virt_core].lock,
+ flags);
/*
* No timeout is required since commands have a hard timeout after
@@ -544,20 +513,26 @@ static int gxp_mailbox_response(struct gxp_client *client,
* proceed per wake event.
*/
wait_event_exclusive_cmd(
- gxp->mailbox_resp_waitqs[phys_core],
- !list_empty(&(gxp->mailbox_resp_queues[phys_core])),
+ client->vd->mailbox_resp_queues[virt_core].waitq,
+ !list_empty(&client->vd->mailbox_resp_queues[virt_core].queue),
/* Release the lock before sleeping */
- spin_unlock_irqrestore(&gxp->mailbox_resps_lock, flags),
+ spin_unlock_irqrestore(
+ &client->vd->mailbox_resp_queues[virt_core].lock,
+ flags),
/* Reacquire the lock after waking */
- spin_lock_irqsave(&gxp->mailbox_resps_lock, flags));
+ spin_lock_irqsave(
+ &client->vd->mailbox_resp_queues[virt_core].lock,
+ flags));
- resp_ptr = list_first_entry(&(gxp->mailbox_resp_queues[phys_core]),
- struct gxp_async_response, list_entry);
+ resp_ptr = list_first_entry(
+ &client->vd->mailbox_resp_queues[virt_core].queue,
+ struct gxp_async_response, list_entry);
/* Pop the front of the response list */
list_del(&(resp_ptr->list_entry));
- spin_unlock_irqrestore(&gxp->mailbox_resps_lock, flags);
+ spin_unlock_irqrestore(&client->vd->mailbox_resp_queues[virt_core].lock,
+ flags);
ibuf.sequence_number = resp_ptr->resp.seq;
switch (resp_ptr->resp.status) {
@@ -691,10 +666,12 @@ gxp_etm_trace_start_command(struct gxp_client *client,
if (!client->has_vd_wakelock) {
dev_err(gxp->dev,
"GXP_ETM_TRACE_START_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- ret = -ENODEV;
- goto out;
+ up_read(&client->semaphore);
+ return -ENODEV;
}
+ down_read(&gxp->vd_semaphore);
+
phys_core =
gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
if (phys_core < 0) {
@@ -711,6 +688,7 @@ gxp_etm_trace_start_command(struct gxp_client *client,
*/
out:
+ up_read(&gxp->vd_semaphore);
up_read(&client->semaphore);
return ret;
@@ -733,10 +711,11 @@ static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
if (!client->has_vd_wakelock) {
dev_err(gxp->dev,
"GXP_ETM_TRACE_SW_STOP_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- ret = -ENODEV;
- goto out;
+ up_read(&client->semaphore);
+ return -ENODEV;
}
+ down_read(&gxp->vd_semaphore);
phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
if (phys_core < 0) {
@@ -753,6 +732,7 @@ static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
*/
out:
+ up_read(&gxp->vd_semaphore);
up_read(&client->semaphore);
return ret;
@@ -775,10 +755,12 @@ static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
if (!client->has_vd_wakelock) {
dev_err(gxp->dev,
"GXP_ETM_TRACE_CLEANUP_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- ret = -ENODEV;
- goto out;
+ up_read(&client->semaphore);
+ return -ENODEV;
}
+ down_read(&gxp->vd_semaphore);
+
phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Trace cleanup failed: Invalid virtual core id (%u)\n",
@@ -794,6 +776,7 @@ static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
*/
out:
+ up_read(&gxp->vd_semaphore);
up_read(&client->semaphore);
return ret;
@@ -822,10 +805,12 @@ gxp_etm_get_trace_info_command(struct gxp_client *client,
if (!client->has_vd_wakelock) {
dev_err(gxp->dev,
"GXP_ETM_GET_TRACE_INFO_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
- ret = -ENODEV;
- goto out;
+ up_read(&client->semaphore);
+ return -ENODEV;
}
+ down_read(&gxp->vd_semaphore);
+
phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Get trace info failed: Invalid virtual core id (%u)\n",
@@ -872,6 +857,7 @@ out_free_header:
kfree(trace_header);
out:
+ up_read(&gxp->vd_semaphore);
up_read(&client->semaphore);
return ret;
@@ -930,16 +916,17 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- /* Caller must hold VIRTUAL_DEVICE wakelock */
down_write(&client->semaphore);
- if (!client->has_vd_wakelock) {
+ if (!client->vd) {
dev_err(gxp->dev,
- "GXP_MAP_TPU_MBX_QUEUE requires the client hold a VIRTUAL_DEVICE wakelock\n");
- ret = -ENODEV;
- goto out;
+ "GXP_MAP_TPU_MBX_QUEUE requires the client allocate a VIRTUAL_DEVICE\n");
+ up_read(&client->semaphore);
+ return -ENODEV;
}
+ down_read(&gxp->vd_semaphore);
+
virtual_core_list = ibuf.virtual_core_list;
core_count = hweight_long(virtual_core_list);
phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
@@ -983,7 +970,8 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
mbx_info->cmdq_size = ALIGN(mbx_info->cmdq_size, PAGE_SIZE);
mbx_info->respq_size = ALIGN(mbx_info->respq_size, PAGE_SIZE);
- ret = gxp_dma_map_tpu_buffer(gxp, phys_core_list, mbx_info);
+ ret = gxp_dma_map_tpu_buffer(gxp, client->vd, virtual_core_list,
+ phys_core_list, mbx_info);
if (ret) {
dev_err(gxp->dev, "%s: failed to map TPU mailbox buffer %d\n",
__func__, ret);
@@ -994,6 +982,7 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
goto out_free;
}
client->mbx_desc.phys_core_list = phys_core_list;
+ client->mbx_desc.virt_core_list = virtual_core_list;
client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
client->mbx_desc.respq_size = mbx_info->respq_size;
client->tpu_mbx_allocated = true;
@@ -1002,6 +991,7 @@ out_free:
kfree(mbx_info);
out:
+ up_read(&gxp->vd_semaphore);
up_write(&client->semaphore);
return ret;
@@ -1022,12 +1012,11 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- /* Caller must hold VIRTUAL_DEVICE wakelock */
down_write(&client->semaphore);
- if (!client->has_vd_wakelock) {
+ if (!client->vd) {
dev_err(gxp->dev,
- "GXP_UNMAP_TPU_MBX_QUEUE requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ "GXP_UNMAP_TPU_MBX_QUEUE requires the client allocate a VIRTUAL_DEVICE\n");
ret = -ENODEV;
goto out;
}
@@ -1039,7 +1028,7 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
goto out;
}
- gxp_dma_unmap_tpu_buffer(gxp, client->mbx_desc);
+ gxp_dma_unmap_tpu_buffer(gxp, client->vd, client->mbx_desc);
gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
@@ -1418,29 +1407,24 @@ static int gxp_map_dmabuf(struct gxp_client *client,
struct gxp_map_dmabuf_ioctl ibuf;
struct gxp_mapping *mapping;
int ret = 0;
- uint phys_core_list;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- /* Caller must hold VIRTUAL_DEVICE wakelock */
+ if (ibuf.virtual_core_list == 0)
+ return -EINVAL;
+
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
+ if (!client->vd) {
dev_err(gxp->dev,
- "GXP_MAP_DMABUF requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ "GXP_MAP_DMABUF requires the client allocate a VIRTUAL_DEVICE\n");
ret = -ENODEV;
goto out;
}
- phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
- client->vd, ibuf.virtual_core_list);
- if (phys_core_list == 0) {
- ret = -EINVAL;
- goto out;
- }
-
- mapping = gxp_dmabuf_map(gxp, phys_core_list, ibuf.dmabuf_fd,
+ mapping = gxp_dmabuf_map(gxp, client->vd, ibuf.virtual_core_list,
+ ibuf.dmabuf_fd,
/*gxp_dma_flags=*/0,
mapping_flags_to_dma_dir(ibuf.flags));
if (IS_ERR(mapping)) {
@@ -1453,7 +1437,7 @@ static int gxp_map_dmabuf(struct gxp_client *client,
if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
/* If the IOCTL fails, the dma-buf must be unmapped */
- gxp_dmabuf_unmap(gxp, ibuf.device_address);
+ gxp_dmabuf_unmap(gxp, client->vd, ibuf.device_address);
ret = -EFAULT;
}
@@ -1473,17 +1457,16 @@ static int gxp_unmap_dmabuf(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- /* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
- if (!client->has_vd_wakelock) {
+ if (!client->vd) {
dev_err(gxp->dev,
- "GXP_UNMAP_DMABUF requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ "GXP_UNMAP_DMABUF requires the client allocate a VIRTUAL_DEVICE\n");
ret = -ENODEV;
goto out;
}
- gxp_dmabuf_unmap(gxp, ibuf.device_address);
+ gxp_dmabuf_unmap(gxp, client->vd, ibuf.device_address);
out:
up_read(&client->semaphore);
@@ -1496,30 +1479,43 @@ static int gxp_register_mailbox_eventfd(
struct gxp_register_mailbox_eventfd_ioctl __user *argp)
{
struct gxp_register_mailbox_eventfd_ioctl ibuf;
- struct eventfd_ctx *new_ctx;
+ struct gxp_eventfd *eventfd;
+ int ret = 0;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- if (ibuf.virtual_core_id >= client->vd->num_cores)
- return -EINVAL;
+ down_write(&client->semaphore);
- /* Make sure the provided eventfd is valid */
- new_ctx = eventfd_ctx_fdget(ibuf.eventfd);
- if (IS_ERR(new_ctx))
- return PTR_ERR(new_ctx);
+ if (!client->vd) {
+ dev_err(client->gxp->dev,
+ "GXP_REGISTER_MAILBOX_EVENTFD requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
- down_write(&client->semaphore);
+ if (ibuf.virtual_core_id >= client->vd->num_cores) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Make sure the provided eventfd is valid */
+ eventfd = gxp_eventfd_create(ibuf.eventfd);
+ if (IS_ERR(eventfd)) {
+ ret = PTR_ERR(eventfd);
+ goto out;
+ }
/* Set the new eventfd, replacing any existing one */
if (client->mb_eventfds[ibuf.virtual_core_id])
- eventfd_ctx_put(client->mb_eventfds[ibuf.virtual_core_id]);
+ gxp_eventfd_put(client->mb_eventfds[ibuf.virtual_core_id]);
- client->mb_eventfds[ibuf.virtual_core_id] = new_ctx;
+ client->mb_eventfds[ibuf.virtual_core_id] = eventfd;
+out:
up_write(&client->semaphore);
- return 0;
+ return ret;
}
static int gxp_unregister_mailbox_eventfd(
@@ -1527,23 +1523,34 @@ static int gxp_unregister_mailbox_eventfd(
struct gxp_register_mailbox_eventfd_ioctl __user *argp)
{
struct gxp_register_mailbox_eventfd_ioctl ibuf;
+ int ret = 0;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- if (ibuf.virtual_core_id >= client->vd->num_cores)
- return -EINVAL;
-
down_write(&client->semaphore);
+ if (!client->vd) {
+ dev_err(client->gxp->dev,
+ "GXP_UNREGISTER_MAILBOX_EVENTFD requires the client allocate a VIRTUAL_DEVICE\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (ibuf.virtual_core_id >= client->vd->num_cores) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (client->mb_eventfds[ibuf.virtual_core_id])
- eventfd_ctx_put(client->mb_eventfds[ibuf.virtual_core_id]);
+ gxp_eventfd_put(client->mb_eventfds[ibuf.virtual_core_id]);
client->mb_eventfds[ibuf.virtual_core_id] = NULL;
+out:
up_write(&client->semaphore);
- return 0;
+ return ret;
}
static int
@@ -1775,7 +1782,6 @@ static int gxp_platform_probe(struct platform_device *pdev)
goto err;
}
-#ifndef CONFIG_GXP_USE_SW_MAILBOX
for (i = 0; i < GXP_NUM_CORES; i++) {
r = platform_get_resource(pdev, IORESOURCE_MEM, i + 1);
if (IS_ERR_OR_NULL(r)) {
@@ -1828,7 +1834,6 @@ static int gxp_platform_probe(struct platform_device *pdev)
dev_warn(dev, "TPU will not be available for interop\n");
gxp->tpu_dev.mbx_paddr = 0;
}
-#endif // !CONFIG_GXP_USE_SW_MAILBOX
ret = gxp_dma_init(gxp);
if (ret) {
@@ -1842,7 +1847,6 @@ static int gxp_platform_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_dma_exit;
}
- spin_lock_init(&gxp->mailbox_resps_lock);
#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
ret = gxp_debug_dump_init(gxp, &gxp_sscd_dev, &gxp_sscd_pdata);
@@ -1868,12 +1872,7 @@ static int gxp_platform_probe(struct platform_device *pdev)
goto err_debug_dump_exit;
}
- ret = gxp_dma_map_resources(gxp);
- if (ret) {
- dev_err(dev, "Failed to map resources for GXP cores (ret=%d)\n",
- ret);
- goto err_vd_destroy;
- }
+ gxp_dma_init_default_resources(gxp);
/* Get GSA device from device tree */
np = of_parse_phandle(dev->of_node, "gsa-device", 0);
@@ -1922,9 +1921,7 @@ err_debug_dump_exit:
err_dma_exit:
gxp_dma_exit(gxp);
err_put_tpu_dev:
-#ifndef CONFIG_GXP_USE_SW_MAILBOX
put_device(gxp->tpu_dev.dev);
-#endif
err:
misc_deregister(&gxp->misc_dev);
devm_kfree(dev, (void *)gxp);
@@ -1940,11 +1937,8 @@ static int gxp_platform_remove(struct platform_device *pdev)
gxp_remove_debugfs(gxp);
gxp_fw_data_destroy(gxp);
gxp_vd_destroy(gxp);
- gxp_dma_unmap_resources(gxp);
gxp_dma_exit(gxp);
-#ifndef CONFIG_GXP_USE_SW_MAILBOX
put_device(gxp->tpu_dev.dev);
-#endif
if (gxp->gsa_dev)
put_device(gxp->gsa_dev);
misc_deregister(&gxp->misc_dev);
diff --git a/gxp-pm.c b/gxp-pm.c
index e7d0a6a..29f6c7e 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -120,18 +120,43 @@ int gxp_pm_blk_set_rate_acpm(struct gxp_dev *gxp, unsigned long rate)
return ret;
}
-static void set_cmu_mux_state(struct gxp_dev *gxp, u32 val)
+static void set_cmu_noc_user_mux_state(struct gxp_dev *gxp, u32 val)
{
- writel(val << 4, gxp->cmu.vaddr + PLL_CON0_PLL_AUR);
writel(val << 4, gxp->cmu.vaddr + PLL_CON0_NOC_USER);
}
+static void set_cmu_pll_aur_mux_state(struct gxp_dev *gxp, u32 val)
+{
+ writel(val << 4, gxp->cmu.vaddr + PLL_CON0_PLL_AUR);
+}
+
+void gxp_pm_force_cmu_noc_user_mux_normal(struct gxp_dev *gxp)
+{
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ if (gxp->power_mgr->curr_state == AUR_READY)
+ set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_NORMAL);
+ gxp->power_mgr->force_noc_mux_normal_count++;
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+}
+
+void gxp_pm_check_cmu_noc_user_mux(struct gxp_dev *gxp)
+{
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ gxp->power_mgr->force_noc_mux_normal_count--;
+ if (gxp->power_mgr->force_noc_mux_normal_count == 0)
+ if (gxp->power_mgr->curr_state == AUR_READY)
+ set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_LOW);
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+}
+
static void gxp_pm_blk_set_state_acpm_async(struct work_struct *work)
{
struct gxp_set_acpm_state_work *set_acpm_state_work =
container_of(work, struct gxp_set_acpm_state_work, work);
mutex_lock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
+ if (set_acpm_state_work->gxp->power_mgr->curr_state == AUR_OFF)
+ goto out;
/*
* This prev_state may be out of date with the manager's current state,
* but we don't need curr_state here. curr_state is the last scheduled
@@ -139,13 +164,20 @@ static void gxp_pm_blk_set_state_acpm_async(struct work_struct *work)
* true because all request are executed synchronously and executed in
* FIFO order.
*/
- if (set_acpm_state_work->prev_state == AUR_READY)
- set_cmu_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_NORMAL);
- else if (set_acpm_state_work->state == AUR_READY)
- set_cmu_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_LOW);
+ if (set_acpm_state_work->prev_state == AUR_READY) {
+ set_cmu_pll_aur_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_NORMAL);
+ set_cmu_noc_user_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_NORMAL);
+ } else if (set_acpm_state_work->state == AUR_READY) {
+ set_cmu_pll_aur_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_LOW);
+ /* Switch NOC_USER mux to low state only if no core is starting the firmware */
+ if (set_acpm_state_work->gxp->power_mgr->force_noc_mux_normal_count == 0)
+ set_cmu_noc_user_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_LOW);
+ }
gxp_pm_blk_set_state_acpm(set_acpm_state_work->gxp,
set_acpm_state_work->state,
set_acpm_state_work->aggressor_vote);
+out:
+ set_acpm_state_work->using = false;
mutex_unlock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
}
@@ -203,12 +235,11 @@ int gxp_pm_blk_off(struct gxp_dev *gxp)
mutex_unlock(&gxp->power_mgr->pm_lock);
return ret;
}
- /*
- * Before the block is off, CMUMUX cannot be low. Otherwise, powering on
- * cores will fail later.
- */
- if (gxp->power_mgr->curr_state == AUR_READY)
- set_cmu_mux_state(gxp, AUR_CMU_MUX_NORMAL);
+ /* Reset MUX frequency from AUR_READY state */
+ if (gxp->power_mgr->curr_state == AUR_READY) {
+ set_cmu_pll_aur_mux_state(gxp, AUR_CMU_MUX_NORMAL);
+ set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_NORMAL);
+ }
/* Shutdown TOP's PSM */
gxp_lpm_destroy(gxp);
@@ -279,6 +310,8 @@ static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
enum aur_power_state state,
bool aggressor_vote)
{
+ uint i;
+
if (state > AUR_MAX_ALLOW_STATE) {
dev_err(gxp->dev, "Invalid state %d\n", state);
return -EINVAL;
@@ -289,14 +322,32 @@ static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
dev_warn(gxp->dev,
"It is not supported to request AUR_OFF\n");
} else {
- gxp->power_mgr->set_acpm_state_work.gxp = gxp;
- gxp->power_mgr->set_acpm_state_work.state = state;
- gxp->power_mgr->set_acpm_state_work.aggressor_vote =
+ for (i = 0; i < AUR_NUM_POWER_STATE_WORKER; i++) {
+ if (!gxp->power_mgr->set_acpm_state_work[i]
+ .using)
+ break;
+ }
+ /* The workqueue stucks, wait for it */
+ if (i == AUR_NUM_POWER_STATE_WORKER) {
+ dev_warn(
+ gxp->dev,
+ "The workqueue for power state transition is full");
+ flush_workqueue(gxp->power_mgr->wq);
+ /*
+ * All set_acpm_state_work should be available
+ * now, pick the first one.
+ */
+ i = 0;
+ }
+ gxp->power_mgr->set_acpm_state_work[i].state = state;
+ gxp->power_mgr->set_acpm_state_work[i].aggressor_vote =
aggressor_vote;
- gxp->power_mgr->set_acpm_state_work.prev_state =
+ gxp->power_mgr->set_acpm_state_work[i].prev_state =
gxp->power_mgr->curr_state;
- queue_work(gxp->power_mgr->wq,
- &gxp->power_mgr->set_acpm_state_work.work);
+ gxp->power_mgr->set_acpm_state_work[i].using = true;
+ queue_work(
+ gxp->power_mgr->wq,
+ &gxp->power_mgr->set_acpm_state_work[i].work);
}
gxp->power_mgr->curr_state = state;
gxp->power_mgr->curr_aggressor_vote = aggressor_vote;
@@ -416,28 +467,47 @@ static void gxp_pm_req_pm_qos_async(struct work_struct *work)
container_of(work, struct gxp_req_pm_qos_work, work);
mutex_lock(&req_pm_qos_work->gxp->power_mgr->pm_lock);
- gxp_pm_req_pm_qos(req_pm_qos_work->gxp, req_pm_qos_work->int_val,
- req_pm_qos_work->mif_val);
+ if (req_pm_qos_work->gxp->power_mgr->curr_state != AUR_OFF)
+ gxp_pm_req_pm_qos(req_pm_qos_work->gxp,
+ req_pm_qos_work->int_val,
+ req_pm_qos_work->mif_val);
+ req_pm_qos_work->using = false;
mutex_unlock(&req_pm_qos_work->gxp->power_mgr->pm_lock);
}
static int gxp_pm_req_memory_state_locked(struct gxp_dev *gxp, enum aur_memory_power_state state)
{
s32 int_val = 0, mif_val = 0;
+ uint i;
if (state > AUR_MAX_ALLOW_MEMORY_STATE) {
dev_err(gxp->dev, "Invalid memory state %d\n", state);
return -EINVAL;
}
if (state != gxp->power_mgr->curr_memory_state) {
+ for (i = 0; i < AUR_NUM_POWER_STATE_WORKER; i++) {
+ if (!gxp->power_mgr->req_pm_qos_work[i].using)
+ break;
+ }
+ /* The workqueue stucks, wait for it */
+ if (i == AUR_NUM_POWER_STATE_WORKER) {
+ dev_warn(
+ gxp->dev,
+ "The workqueue for memory power state transition is full");
+ flush_workqueue(gxp->power_mgr->wq);
+ /*
+ * All req_pm_qos_work should be available
+ * now, pick the first one.
+ */
+ i = 0;
+ }
gxp->power_mgr->curr_memory_state = state;
int_val = aur_memory_state2int_table[state];
mif_val = aur_memory_state2mif_table[state];
- gxp->power_mgr->req_pm_qos_work.gxp = gxp;
- gxp->power_mgr->req_pm_qos_work.int_val = int_val;
- gxp->power_mgr->req_pm_qos_work.mif_val = mif_val;
- queue_work(gxp->power_mgr->wq,
- &gxp->power_mgr->req_pm_qos_work.work);
+ gxp->power_mgr->req_pm_qos_work[i].int_val = int_val;
+ gxp->power_mgr->req_pm_qos_work[i].mif_val = mif_val;
+ gxp->power_mgr->req_pm_qos_work[i].using = true;
+ queue_work(gxp->power_mgr->wq, &gxp->power_mgr->req_pm_qos_work[i].work);
}
return 0;
@@ -542,6 +612,7 @@ int gxp_pm_release_blk_wakelock(struct gxp_dev *gxp)
int gxp_pm_init(struct gxp_dev *gxp)
{
struct gxp_power_manager *mgr;
+ uint i;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
if (!mgr)
@@ -554,10 +625,19 @@ int gxp_pm_init(struct gxp_dev *gxp)
refcount_set(&(mgr->blk_wake_ref), 0);
mgr->ops = &gxp_aur_ops;
gxp->power_mgr = mgr;
- INIT_WORK(&mgr->set_acpm_state_work.work, gxp_pm_blk_set_state_acpm_async);
- INIT_WORK(&mgr->req_pm_qos_work.work, gxp_pm_req_pm_qos_async);
+ for (i = 0; i < AUR_NUM_POWER_STATE_WORKER; i++) {
+ mgr->set_acpm_state_work[i].gxp = gxp;
+ mgr->set_acpm_state_work[i].using = false;
+ mgr->req_pm_qos_work[i].gxp = gxp;
+ mgr->req_pm_qos_work[i].using = false;
+ INIT_WORK(&mgr->set_acpm_state_work[i].work,
+ gxp_pm_blk_set_state_acpm_async);
+ INIT_WORK(&mgr->req_pm_qos_work[i].work,
+ gxp_pm_req_pm_qos_async);
+ }
gxp->power_mgr->wq =
create_singlethread_workqueue("gxp_power_work_queue");
+ gxp->power_mgr->force_noc_mux_normal_count = 0;
#if defined(CONFIG_GXP_CLOUDRIPPER) && !defined(CONFIG_GXP_TEST)
pm_runtime_enable(gxp->dev);
diff --git a/gxp-pm.h b/gxp-pm.h
index 111ba7a..647f99a 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -59,6 +59,8 @@ enum aur_power_cmu_mux_state {
*/
#define AUR_NON_AGGRESSOR_BIT 24
+#define AUR_NUM_POWER_STATE_WORKER 16
+
struct gxp_pm_device_ops {
int (*pre_blk_powerup)(struct gxp_dev *gxp);
int (*post_blk_powerup)(struct gxp_dev *gxp);
@@ -72,6 +74,7 @@ struct gxp_set_acpm_state_work {
unsigned long state;
unsigned long prev_state;
bool aggressor_vote;
+ bool using;
};
struct gxp_req_pm_qos_work {
@@ -79,6 +82,7 @@ struct gxp_req_pm_qos_work {
struct gxp_dev *gxp;
s32 int_val;
s32 mif_val;
+ bool using;
};
struct gxp_power_manager {
@@ -92,12 +96,13 @@ struct gxp_power_manager {
int curr_memory_state;
refcount_t blk_wake_ref;
struct gxp_pm_device_ops *ops;
- struct gxp_set_acpm_state_work set_acpm_state_work;
- struct gxp_req_pm_qos_work req_pm_qos_work;
+ struct gxp_set_acpm_state_work set_acpm_state_work[AUR_NUM_POWER_STATE_WORKER];
+ struct gxp_req_pm_qos_work req_pm_qos_work[AUR_NUM_POWER_STATE_WORKER];
struct workqueue_struct *wq;
/* INT/MIF requests for memory bandwidth */
struct exynos_pm_qos_request int_min;
struct exynos_pm_qos_request mif_min;
+ int force_noc_mux_normal_count;
};
/**
@@ -264,4 +269,18 @@ int gxp_pm_update_requested_memory_power_state(
struct gxp_dev *gxp, enum aur_memory_power_state origin_state,
enum aur_memory_power_state requested_state);
+/*
+ * gxp_pm_force_cmu_noc_user_mux_normal() - Force PLL_CON0_NOC_USER MUX switch to the
+ * normal state. This is required to guarantee LPM works when the core is starting the
+ * firmware.
+ */
+void gxp_pm_force_cmu_noc_user_mux_normal(struct gxp_dev *gxp);
+
+/*
+ * gxp_pm_check_cmu_noc_user_mux() - Check PLL_CON0_NOC_USER MUX state modified
+ * by gxp_pm_force_cmu_noc_user_mux_normal(). If the requested state is
+ * AUR_READY, should set it to AUR_CMU_MUX_LOW.
+ */
+void gxp_pm_check_cmu_noc_user_mux(struct gxp_dev *gxp);
+
#endif /* __GXP_PM_H__ */
diff --git a/gxp-sw-mailbox-driver.c b/gxp-sw-mailbox-driver.c
deleted file mode 100644
index cd67358..0000000
--- a/gxp-sw-mailbox-driver.c
+++ /dev/null
@@ -1,508 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * GXP kernel-userspace interface definitions.
- *
- * Copyright (C) 2020 Google LLC
- */
-
-#include <linux/bitops.h>
-#include <linux/kthread.h>
-
-#include "gxp-tmp.h"
-#include "gxp-mailbox.h"
-#include "gxp-mailbox-driver.h"
-#include "gxp-mailbox-regs.h"
-
-/* Doorbells for emulating Mailbox Interrupts to device */
-#define MBOX_DOORBELL_INDEX(__core__) (31 - __core__)
-#define MBOX_SET_INT_TO_DEVICE(__mailbox__) \
- writel(ENABLE, __mailbox__->gxp->regs.vaddr + DOORBELL_BLOCK + \
- DOORBELL_BASE(MBOX_DOORBELL_INDEX( \
- __mailbox__->core_id)) + \
- DOORBELL_SET_OFFSET)
-#define MBOX_CLEAR_INT_TO_DEVICE(__mailbox__) \
- writel(ENABLE, __mailbox__->gxp->regs.vaddr + DOORBELL_BLOCK + \
- DOORBELL_BASE(MBOX_DOORBELL_INDEX( \
- __mailbox__->core_id)) + \
- DOORBELL_CLEAR_OFFSET)
-
-/* Emulated Mailbox Register Macros */
-#define MBOX_CSR_SCRATCHPAD_OFFSET 0x100
-
-#define MBOX_ACCESS_SYNC_BARRIER 15 /* Start at the end of the sync barriers */
-
-/* Register Access */
-
-static u32 csr_read(struct gxp_mailbox *mailbox, uint reg_offset)
-{
- u32 read_value;
-
- gxp_acquire_sync_barrier(mailbox->gxp, MBOX_ACCESS_SYNC_BARRIER);
-
- switch (reg_offset) {
- case MBOX_MCUCTLR_OFFSET:
- case MBOX_INTGR0_OFFSET:
- case MBOX_INTMR0_OFFSET:
- case MBOX_INTSR0_OFFSET:
- case MBOX_INTMSR0_OFFSET:
- case MBOX_INTGR1_OFFSET:
- case MBOX_INTMR1_OFFSET:
- case MBOX_INTSR1_OFFSET:
- case MBOX_INTMSR1_OFFSET:
- break;
- case MBOX_INTCR0_OFFSET:
- case MBOX_INTCR1_OFFSET:
- dev_notice(mailbox->gxp->dev,
- "Attempt to read write-only mailbox CSR 0x%X\n",
- reg_offset);
- read_value = 0;
- goto csr_read_exit;
- default:
- dev_notice(mailbox->gxp->dev,
- "Attempt to read non-existent mailbox CSR 0x%X\n",
- reg_offset);
- read_value = 0;
- goto csr_read_exit;
- }
-
- read_value = readl(mailbox->csr_reg_base + reg_offset);
-
-csr_read_exit:
- gxp_release_sync_barrier(mailbox->gxp, MBOX_ACCESS_SYNC_BARRIER);
-
- return read_value;
-}
-
-static void csr_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
-{
- bool send_interrupt = false;
- u32 gen_val, clear_val, mask_val, status_val;
-
- gxp_acquire_sync_barrier(mailbox->gxp, MBOX_ACCESS_SYNC_BARRIER);
-
- /* Emulate any side effects for CSR writes */
- switch (reg_offset) {
- case MBOX_MCUCTLR_OFFSET:
- /* side effects not implemented */
- break;
- case MBOX_INTGR0_OFFSET:
- /*
- * 1. Set interrupt status register
- */
- writel(value, mailbox->csr_reg_base + MBOX_INTSR0_OFFSET);
- /*
- * 2. Check interrupt mask-status and clear registers
- */
- mask_val = readl(mailbox->csr_reg_base + MBOX_INTMSR0_OFFSET);
- clear_val = readl(mailbox->csr_reg_base + MBOX_INTCR0_OFFSET);
-
- if ((value & mask_val) & clear_val) {
- /*
- * 3. Update the clear register to reflect outgoing
- * interrupts
- *
- * A 0-bit in the clear register indicates an interrupt
- * waiting to be serviced, and therefore masked from
- * further generation.
- *
- * Set the bits of any newly-generated sources to 0.
- * The only bits which shold remain set are those
- * already 1 in the clear register and not being set
- * (i.e. 0 in value & mask_val).
- */
- writel(~(value & mask_val) & clear_val,
- mailbox->csr_reg_base + MBOX_INTCR0_OFFSET);
- /*
- * 4. If set interrupts aren't masked, trigger HW
- * interrupt
- */
- send_interrupt = true;
- }
- break;
- case MBOX_INTCR0_OFFSET:
- /*
- * 1. Clear interrupt generation register
- */
- gen_val = readl(mailbox->csr_reg_base + MBOX_INTGR0_OFFSET);
- writel(~value & gen_val,
- mailbox->csr_reg_base + MBOX_INTGR0_OFFSET);
- /*
- * 2. Clear interrupt status register
- */
- status_val = readl(mailbox->csr_reg_base + MBOX_INTSR0_OFFSET);
- writel(~value & status_val,
- mailbox->csr_reg_base + MBOX_INTSR0_OFFSET);
- /*
- * 3. Update the clear register unmask any cleared interrupts
- *
- * A 1 written to any bit should re-enable that interrupt,
- * meaning the new value written should be 1 as well. OR.
- */
- clear_val = readl(mailbox->csr_reg_base + MBOX_INTCR0_OFFSET);
- writel(value | clear_val,
- mailbox->csr_reg_base + MBOX_INTCR0_OFFSET);
- /*
- * 4. Clear outgoing HW interrupt
- */
- MBOX_CLEAR_INT_TO_DEVICE(mailbox);
-
- /*
- * Value written to MBOX_INTCR0_OFFSET is not the actual
- * value stored in memory, so bail here.
- */
- goto csr_write_exit;
- case MBOX_INTMR0_OFFSET:
- /*
- * Update the interrupt mask status register
- * In this register 1 = masked, but in the mask status register
- * 1 = enabled, so the inverse value must be written.
- */
- writel(~value, mailbox->csr_reg_base + MBOX_INTMSR0_OFFSET);
- break;
- case MBOX_INTGR1_OFFSET:
- dev_notice(
- mailbox->gxp->dev,
- "Writing to-host interrupt from host. Is this a mistake?\n");
- /*
- * 1. Set interrupt status register
- */
- writel(value, mailbox->csr_reg_base + MBOX_INTSR1_OFFSET);
- /*
- * 2. Check interrupt mask-status and clear registers
- */
- mask_val = readl(mailbox->csr_reg_base + MBOX_INTMSR1_OFFSET);
- clear_val = readl(mailbox->csr_reg_base + MBOX_INTCR1_OFFSET);
-
- if ((value & mask_val) & clear_val) {
- /*
- * 3. Update the clear register to reflect outgoing
- * interrupts
- *
- * A 0-bit in the clear register indicates an interrupt
- * waiting to be serviced, and therefore masked from
- * further generation.
- *
- * Set the bits of any newly-generated sources to 0.
- * The only bits which shold remain set are those
- * already 1 in the clear register and not being set
- * (i.e. 0 in value & mask_val).
- */
- writel(~(value & mask_val) & clear_val,
- mailbox->csr_reg_base + MBOX_INTCR1_OFFSET);
- /*
- * 4. If set interrupts aren't masked, trigger HW
- * interrupt
- */
- /*
- * Software mailboxes don't have a to-host interrupt,
- * so the host polls the status register and no
- * further action is required.
- */
- }
- break;
- case MBOX_INTCR1_OFFSET:
- /*
- * 1. Clear interrupt generation register
- */
- gen_val = readl(mailbox->csr_reg_base + MBOX_INTGR1_OFFSET);
- writel(~value & gen_val,
- mailbox->csr_reg_base + MBOX_INTGR1_OFFSET);
- /*
- * 2. Clear interrupt status register
- */
- status_val = readl(mailbox->csr_reg_base + MBOX_INTSR1_OFFSET);
- writel(~value & status_val,
- mailbox->csr_reg_base + MBOX_INTSR1_OFFSET);
- /*
- * 3. Update the clear register unmask any cleared interrupts
- *
- * A 1 written to any bit should re-enable that interrupt,
- * meaning the new value written should be 1 as well. OR.
- */
- clear_val = readl(mailbox->csr_reg_base + MBOX_INTCR1_OFFSET);
- writel(value | clear_val,
- mailbox->csr_reg_base + MBOX_INTCR1_OFFSET);
- /*
- * 4. Clear outgoing HW interrupt
- */
- /*
- * Software mailboxes don't have a to-host interrupt, so the
- * host polls the status register and no further action is
- * required.
- */
-
- /*
- * Value written to MBOX_INTCR1_OFFSET is not the actual
- * value stored in memory, so bail here.
- */
- goto csr_write_exit;
- case MBOX_INTMR1_OFFSET:
- /*
- * Update the interrupt mask status register
- * In this register 1 = masked, but in the mask status register
- * 1 = enabled, so the inverse value must be written.
- */
- writel(~value, mailbox->csr_reg_base + MBOX_INTMSR1_OFFSET);
- break;
- case MBOX_INTSR0_OFFSET:
- case MBOX_INTMSR0_OFFSET:
- case MBOX_INTSR1_OFFSET:
- case MBOX_INTMSR1_OFFSET:
- dev_notice(mailbox->gxp->dev,
- "Attempt to write read-only mailbox CSR 0x%X\n",
- reg_offset);
- goto csr_write_exit;
- default:
- dev_notice(mailbox->gxp->dev,
- "Attempt to write non-existent mailbox CSR 0x%X\n",
- reg_offset);
- goto csr_write_exit;
- }
-
- writel(value, mailbox->csr_reg_base + reg_offset);
-
-csr_write_exit:
- if (send_interrupt)
- MBOX_SET_INT_TO_DEVICE(mailbox);
-
- gxp_release_sync_barrier(mailbox->gxp, MBOX_ACCESS_SYNC_BARRIER);
-}
-
-static u32 data_read(struct gxp_mailbox *mailbox, uint reg_offset)
-{
- u32 read_value;
-
- gxp_acquire_sync_barrier(mailbox->gxp, MBOX_ACCESS_SYNC_BARRIER);
-
- read_value = readl(mailbox->data_reg_base + reg_offset);
-
- gxp_release_sync_barrier(mailbox->gxp, MBOX_ACCESS_SYNC_BARRIER);
-
- return read_value;
-}
-
-static void data_write(struct gxp_mailbox *mailbox, uint reg_offset, u32 value)
-{
- gxp_acquire_sync_barrier(mailbox->gxp, MBOX_ACCESS_SYNC_BARRIER);
-
- writel(value, mailbox->data_reg_base + reg_offset);
-
- gxp_release_sync_barrier(mailbox->gxp, MBOX_ACCESS_SYNC_BARRIER);
-}
-
-/* IRQ Handling */
-
-#define MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK BIT(0)
-
-static int poll_int_thread(void *data)
-{
- u32 status_value, mask_value, masked_status_value, next_int;
- struct gxp_mailbox *mailbox = (struct gxp_mailbox *)data;
- struct work_struct **handlers = mailbox->interrupt_handlers;
-
- while (!kthread_should_stop()) {
- mutex_lock(&mailbox->polling_lock);
-
- gxp_acquire_sync_barrier(mailbox->gxp,
- MBOX_ACCESS_SYNC_BARRIER);
- status_value =
- readl(mailbox->csr_reg_base + MBOX_INTSR1_OFFSET);
- mask_value = readl(mailbox->csr_reg_base + MBOX_INTMSR1_OFFSET);
- gxp_release_sync_barrier(mailbox->gxp,
- MBOX_ACCESS_SYNC_BARRIER);
-
- masked_status_value = status_value & mask_value;
-
- if (masked_status_value &
- MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK) {
- mailbox->handle_irq(mailbox);
- masked_status_value &=
- ~MBOX_DEVICE_TO_HOST_RESPONSE_IRQ_MASK;
- }
-
- while ((next_int = ffs(masked_status_value))) {
- next_int--; /* ffs returns 1-based indices */
- masked_status_value &= ~BIT(next_int);
-
- if (handlers[next_int])
- schedule_work(handlers[next_int]);
- else
- pr_err_ratelimited(
- "mailbox%d: received unknown interrupt bit 0x%X\n",
- mailbox->core_id, next_int);
- }
-
- gxp_mailbox_clear_host_interrupt(mailbox,
- status_value & mask_value);
-
- mutex_unlock(&mailbox->polling_lock);
-
- /* TODO(b/177701517): Polling frequency is untuned.*/
- msleep(200);
- }
-
- return 0;
-}
-
-/* gxp-mailbox-driver.h interface */
-
-void gxp_mailbox_driver_init(struct gxp_mailbox *mailbox)
-{
- /* Clear and unmask all to-device interrupts */
- csr_write(mailbox, MBOX_INTCR0_OFFSET, 0xFFFFFFFF);
- csr_write(mailbox, MBOX_INTMR0_OFFSET, 0x00000000);
- /* Clear and unmask all to-host interrupts */
- csr_write(mailbox, MBOX_INTCR1_OFFSET, 0xFFFFFFFF);
- csr_write(mailbox, MBOX_INTMR1_OFFSET, 0x00000000);
-
- /* Setup a polling thread to check for to-host "interrupts" */
- mutex_init(&mailbox->polling_lock);
- mailbox->to_host_poll_task =
- kthread_run(poll_int_thread, mailbox,
- "gxp_poll_mailbox%d_to_host", mailbox->core_id);
-
- if (IS_ERR(mailbox->to_host_poll_task)) {
- dev_err(mailbox->gxp->dev,
- "Failed to start polling for incoming updates from mailbox %d\n",
- mailbox->core_id);
- }
-}
-
-void gxp_mailbox_driver_exit(struct gxp_mailbox *mailbox)
-{
- if (!IS_ERR_OR_NULL(mailbox->to_host_poll_task)) {
- mutex_lock(&mailbox->polling_lock);
- kthread_stop(mailbox->to_host_poll_task);
- mutex_unlock(&mailbox->polling_lock);
- }
-}
-
-void __iomem *gxp_mailbox_get_csr_base(struct gxp_dev *gxp, uint index)
-{
- return gxp->fwbufs[index].vaddr + AURORA_SCRATCHPAD_OFF +
- MBOX_CSR_SCRATCHPAD_OFFSET;
-}
-
-void __iomem *gxp_mailbox_get_data_base(struct gxp_dev *gxp, uint index)
-{
- return gxp->fwbufs[index].vaddr + AURORA_SCRATCHPAD_OFF +
- MBOX_CSR_SCRATCHPAD_OFFSET + MBOX_DATA_REG_BASE;
-}
-
-/* gxp-mailbox-driver.h: CSR-based calls */
-
-void gxp_mailbox_reset_hw(struct gxp_mailbox *mailbox)
-{
- csr_write(mailbox, MBOX_MCUCTLR_OFFSET, 1);
-}
-
-void gxp_mailbox_generate_device_interrupt(struct gxp_mailbox *mailbox,
- u32 int_mask)
-{
- csr_write(mailbox, MBOX_INTGR0_OFFSET, int_mask);
-}
-
-u32 gxp_mailbox_get_device_mask_status(struct gxp_mailbox *mailbox)
-{
- return csr_read(mailbox, MBOX_INTMSR0_OFFSET);
-}
-
-void gxp_mailbox_clear_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
-{
- csr_write(mailbox, MBOX_INTCR1_OFFSET, int_mask);
-}
-
-void gxp_mailbox_mask_host_interrupt(struct gxp_mailbox *mailbox, u32 int_mask)
-{
- csr_write(mailbox, MBOX_INTMR1_OFFSET, int_mask);
-}
-
-u32 gxp_mailbox_get_host_mask_status(struct gxp_mailbox *mailbox)
-{
- return csr_read(mailbox, MBOX_INTMSR1_OFFSET);
-}
-
-/* gxp-mailbox-driver.h: Data register-based callss */
-
-void gxp_mailbox_write_status(struct gxp_mailbox *mailbox, u32 status)
-{
- data_write(mailbox, MBOX_STATUS_OFFSET, status);
-}
-
-void gxp_mailbox_write_descriptor(struct gxp_mailbox *mailbox,
- dma_addr_t descriptor_addr)
-{
- data_write(mailbox, MBOX_DESCRIPTOR_ADDR_OFFSET, (u32)descriptor_addr);
-}
-
-void gxp_mailbox_write_cmd_queue_tail(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_resp_head =
- data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
- RESP_HEAD_MASK;
- u32 new_cmd_tail = (u32)val << CMD_TAIL_SHIFT;
-
- data_write(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET,
- new_cmd_tail | current_resp_head);
-}
-
-void gxp_mailbox_write_resp_queue_head(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_cmd_tail =
- data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET) &
- CMD_TAIL_MASK;
- u32 new_resp_head = (u32)val << RESP_HEAD_SHIFT;
-
- data_write(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET,
- current_cmd_tail | new_resp_head);
-}
-
-u16 gxp_mailbox_read_cmd_queue_head(struct gxp_mailbox *mailbox)
-{
- u32 reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
-
- return (u16)((reg_val & CMD_HEAD_MASK) >> CMD_HEAD_SHIFT);
-}
-
-u16 gxp_mailbox_read_resp_queue_tail(struct gxp_mailbox *mailbox)
-{
- u32 reg_val = data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET);
-
- return (u16)((reg_val & RESP_TAIL_MASK) >> RESP_TAIL_SHIFT);
-}
-
-void gxp_mailbox_write_cmd_queue_head(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_resp_tail =
- data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
- RESP_TAIL_MASK;
- u32 new_cmd_head = (u32)val << CMD_HEAD_SHIFT;
-
- data_write(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET,
- new_cmd_head | current_resp_tail);
-}
-
-void gxp_mailbox_write_resp_queue_tail(struct gxp_mailbox *mailbox, u16 val)
-{
- u32 current_cmd_head =
- data_read(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET) &
- CMD_HEAD_MASK;
- u32 new_resp_tail = (u32)val << RESP_TAIL_SHIFT;
-
- data_write(mailbox, MBOX_CMD_HEAD_RESP_TAIL_OFFSET,
- current_cmd_head | new_resp_tail);
-}
-
-u16 gxp_mailbox_read_cmd_queue_tail(struct gxp_mailbox *mailbox)
-{
- u32 reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
-
- return (u16)((reg_val & CMD_TAIL_MASK) >> CMD_TAIL_SHIFT);
-}
-
-u16 gxp_mailbox_read_resp_queue_head(struct gxp_mailbox *mailbox)
-{
- u32 reg_val = data_read(mailbox, MBOX_CMD_TAIL_RESP_HEAD_OFFSET);
-
- return (u16)((reg_val & RESP_HEAD_MASK) >> RESP_HEAD_SHIFT);
-}
diff --git a/gxp-telemetry.c b/gxp-telemetry.c
index 0afc6de..18533db 100644
--- a/gxp-telemetry.c
+++ b/gxp-telemetry.c
@@ -15,6 +15,7 @@
#include "gxp-host-device-structs.h"
#include "gxp-notification.h"
#include "gxp-telemetry.h"
+#include "gxp-vd.h"
static inline bool is_core_telemetry_enabled(struct gxp_dev *gxp, uint core,
u8 type)
@@ -109,6 +110,8 @@ static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
struct buffer_data *data;
u8 type;
int i;
+ uint virt_core;
+ struct gxp_virtual_device *vd;
gxp = ((struct telemetry_vma_data *)vma->vm_private_data)->gxp;
data = ((struct telemetry_vma_data *)vma->vm_private_data)->data;
@@ -116,14 +119,26 @@ static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
mutex_lock(&gxp->telemetry_mgr->lock);
+ down_read(&gxp->vd_semaphore);
if (refcount_dec_and_test(&data->ref_count)) {
if (data->host_status & GXP_TELEMETRY_HOST_STATUS_ENABLED)
telemetry_disable_locked(gxp, type);
- for (i = 0; i < GXP_NUM_CORES; i++)
- gxp_dma_free_coherent(gxp, BIT(i), data->size,
- data->buffers[i],
- data->buffer_daddrs[i]);
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ vd = gxp->core_to_vd[i];
+ if (vd != NULL) {
+ virt_core =
+ gxp_vd_phys_core_to_virt_core(vd, i);
+ gxp_dma_free_coherent(gxp, vd, BIT(virt_core),
+ data->size,
+ data->buffers[i],
+ data->buffer_daddrs[i]);
+ } else {
+ gxp_dma_free_coherent(gxp, NULL, 0, data->size,
+ data->buffers[i],
+ data->buffer_daddrs[i]);
+ }
+ }
switch (type) {
case GXP_TELEMETRY_TYPE_LOGGING:
gxp->telemetry_mgr->logging_buff_data = NULL;
@@ -138,6 +153,7 @@ static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
kfree(data);
kfree(vma->vm_private_data);
}
+ up_read(&gxp->vd_semaphore);
mutex_unlock(&gxp->telemetry_mgr->lock);
}
@@ -186,6 +202,7 @@ static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
* @size: The size of buffer to allocate for each core
*
* Caller must hold the telemetry_manager's lock.
+ * Caller must hold gxp->vd_semaphore for reading.
*
* Return: A pointer to the `struct buffer_data` if successful, NULL otherwise
*/
@@ -194,6 +211,8 @@ static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
{
struct buffer_data *data;
int i;
+ uint virt_core;
+ struct gxp_virtual_device *vd;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
@@ -201,10 +220,22 @@ static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
/* Allocate cache-coherent buffers for logging/tracing to */
for (i = 0; i < GXP_NUM_CORES; i++) {
- data->buffers[i] =
- gxp_dma_alloc_coherent(gxp, BIT(i), size,
- &data->buffer_daddrs[i],
- GFP_KERNEL, 0);
+ /*
+ * If the core is not allocated, we cannot map the buffer on
+ * that core.
+ */
+ vd = gxp->core_to_vd[i];
+ if (vd != NULL) {
+ virt_core = gxp_vd_phys_core_to_virt_core(vd, i);
+ data->buffers[i] = gxp_dma_alloc_coherent(
+ gxp, vd, BIT(virt_core), size,
+ &data->buffer_daddrs[i], GFP_KERNEL, 0);
+ } else {
+ data->buffers[i] =
+ gxp_dma_alloc_coherent(gxp, NULL, 0, size,
+ &data->buffer_daddrs[i],
+ GFP_KERNEL, 0);
+ }
if (!data->buffers[i])
goto err_alloc;
}
@@ -215,9 +246,17 @@ static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
err_alloc:
for (; i > 0; i--) {
- gxp_dma_free_coherent(gxp, BIT(i - 1), size,
- data->buffers[i - 1],
- data->buffer_daddrs[i - 1]);
+ vd = gxp->core_to_vd[i-1];
+ if (vd != NULL) {
+ virt_core = gxp_vd_phys_core_to_virt_core(vd, i);
+ gxp_dma_free_coherent(gxp, vd, BIT(virt_core), size,
+ data->buffers[i - 1],
+ data->buffer_daddrs[i - 1]);
+ } else {
+ gxp_dma_free_coherent(gxp, NULL, 0, size,
+ data->buffers[i - 1],
+ data->buffer_daddrs[i - 1]);
+ }
}
kfree(data);
@@ -294,6 +333,8 @@ int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
size_t size = total_size / GXP_NUM_CORES;
struct buffer_data *data;
int i;
+ struct gxp_virtual_device *vd;
+ uint virt_core;
if (!gxp->telemetry_mgr)
return -ENODEV;
@@ -303,6 +344,7 @@ int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
return -EINVAL;
mutex_lock(&gxp->telemetry_mgr->lock);
+ down_read(&gxp->vd_semaphore);
ret = check_telemetry_type_availability(gxp, type);
if (ret)
@@ -335,20 +377,32 @@ int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
else /* type == GXP_TELEMETRY_TYPE_TRACING */
gxp->telemetry_mgr->tracing_buff_data = data;
+ up_read(&gxp->vd_semaphore);
mutex_unlock(&gxp->telemetry_mgr->lock);
return 0;
err_free_buffers:
- for (i = 0; i < GXP_NUM_CORES; i++)
- gxp_dma_free_coherent(gxp, BIT(i), data->size, data->buffers[i],
- data->buffer_daddrs[i]);
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ vd = gxp->core_to_vd[i];
+ if (vd != NULL) {
+ virt_core = gxp_vd_phys_core_to_virt_core(vd, i);
+ gxp_dma_free_coherent(gxp, vd, BIT(virt_core),
+ data->size, data->buffers[i],
+ data->buffer_daddrs[i]);
+ } else {
+ gxp_dma_free_coherent(gxp, NULL, 0, data->size,
+ data->buffers[i],
+ data->buffer_daddrs[i]);
+ }
+ }
kfree(data);
err_free_vma_data:
kfree(vma_data);
err:
+ up_read(&gxp->vd_semaphore);
mutex_unlock(&gxp->telemetry_mgr->lock);
return ret;
}
diff --git a/gxp-tmp.h b/gxp-tmp.h
deleted file mode 100644
index b813867..0000000
--- a/gxp-tmp.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Temporary configuration file fore GXP.
- *
- * Copyright (C) 2021 Google LLC
- */
-#ifndef __GXP_TMP_H__
-#define __GXP_TMP_H__
-
-/* TODO (b/176979630): Delete gxp.tmp.h. Move definitions to gxp-config.h */
-
-#if !IS_ENABLED(CONFIG_GXP_TEST)
-
-#define AURORA_SCRATCHPAD_OFF 0x00F00000 /* Last 1M of ELF load region */
-#define AURORA_SCRATCHPAD_LEN 0x00100000 /* 1M */
-
-#else /* CONFIG_GXP_TEST */
-/* Firmware memory is shrunk in unit tests. */
-#define AURORA_SCRATCHPAD_OFF 0x000F0000
-#define AURORA_SCRATCHPAD_LEN 0x00010000
-
-#endif /* CONFIG_GXP_TEST */
-
-#define Q7_ALIVE_MAGIC 0x55555555
-
-#define LPM_BLOCK 0x040000
-#define DOORBELL_BLOCK 0x0C0000
-#define SYNC_BARRIER_BLOCK 0x00100000
-
-#define DOORBELL_BASE(_x_) ((_x_) << 12)
-#define DOORBELL_COUNT 32
-#define DOORBELL_STATUS_OFFSET 0x0
-#define DOORBELL_SET_OFFSET 0x4
-#define DOORBELL_CLEAR_OFFSET 0x8
-#define DOORBELL_EN_ALL_MASK 0xFFFFFFFF
-
-#define SYNC_BARRIER_BASE(_x_) ((_x_) << 12)
-#define SYNC_BARRIER_FREE_VALUE 0xF
-#define SYNC_BARRIER_COUNT 16
-
-#define CORE_PSM_BASE(_core_) ((_core_ + 1) << 12)
-#define TOP_PSM_BASE 0x5000
-
-#define PSM_INIT_DONE_MASK 0x80
-#define PSM_CURR_STATE_MASK 0x0F
-#define PSM_STATE_VALID_MASK 0x10
-
-#define PSM_HW_MODE 0x0
-#define PSM_START 0x1
-
-#define PSM_STATE_ACTIVE 0x0
-#define PSM_STATE_CLK_GATED 0x1
-
-#define PROVINO_IXBAR1_ARL_CTRL 0x1818
-#define PROVINO_IXBAR1_ARL_EN (0x1 << 31)
-
-#define DISABLE 0x0
-#define ENABLE 0x1
-
-#define CORE_SCRATCHPAD_BASE(_core_) (_core_ << 16)
-#define SCRATCHPAD_MSG_OFFSET(_msg_) (_msg_ << 2)
-
-enum aurora_msg {
- MSG_CORE_ALIVE,
- MSG_TOP_ACCESS_OK,
- MSG_SCRATCHPAD_MAX,
-};
-
-#endif /* __GXP_TMP_H__ */
diff --git a/gxp-vd.c b/gxp-vd.c
index f632a52..9c5c805 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -6,13 +6,14 @@
*/
#include <linux/bitops.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/types.h>
+#include "gxp-dma.h"
#include "gxp-firmware.h"
#include "gxp-firmware-data.h"
#include "gxp-internal.h"
+#include "gxp-mailbox.h"
+#include "gxp-telemetry.h"
#include "gxp-vd.h"
int gxp_vd_init(struct gxp_dev *gxp)
@@ -40,9 +41,12 @@ void gxp_vd_destroy(struct gxp_dev *gxp)
up_write(&gxp->vd_semaphore);
}
-struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp, u16 requested_cores)
+struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp,
+ u16 requested_cores)
{
struct gxp_virtual_device *vd;
+ int i;
+ int err = 0;
/* Assumes 0 < requested_cores <= GXP_NUM_CORES */
if (requested_cores == 0 || requested_cores > GXP_NUM_CORES)
@@ -55,24 +59,114 @@ struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp, u16 requested_co
vd->gxp = gxp;
vd->num_cores = requested_cores;
- /*
- * TODO(b/209083969) Initialize VD aux domain here to support VD
- * suspend/resume and mapping without a VIRTUAL_DEVICE wakelock.
- */
+ vd->core_domains =
+ kcalloc(requested_cores, sizeof(*vd->core_domains), GFP_KERNEL);
+ if (!vd->core_domains) {
+ err = -ENOMEM;
+ goto error_free_vd;
+ }
+ for (i = 0; i < requested_cores; i++) {
+ vd->core_domains[i] = iommu_domain_alloc(gxp->dev->bus);
+ if (!vd->core_domains[i])
+ goto error_free_domains;
+ }
+
+ vd->mailbox_resp_queues = kcalloc(
+ vd->num_cores, sizeof(*vd->mailbox_resp_queues), GFP_KERNEL);
+ if (!vd->mailbox_resp_queues) {
+ err = -ENOMEM;
+ goto error_free_domains;
+ }
+
+ for (i = 0; i < vd->num_cores; i++) {
+ INIT_LIST_HEAD(&vd->mailbox_resp_queues[i].queue);
+ spin_lock_init(&vd->mailbox_resp_queues[i].lock);
+ init_waitqueue_head(&vd->mailbox_resp_queues[i].waitq);
+ }
return vd;
+
+error_free_domains:
+ for (i -= 1; i >= 0; i--)
+ iommu_domain_free(vd->core_domains[i]);
+ kfree(vd->core_domains);
+error_free_vd:
+ kfree(vd);
+
+ return err ? ERR_PTR(err) : NULL;
}
void gxp_vd_release(struct gxp_virtual_device *vd)
{
- /*
- * TODO(b/209083969) Cleanup VD aux domain once it's created in
- * gxp_vd_allocate().
- */
+ struct gxp_async_response *cur, *nxt;
+ int i;
+ unsigned long flags;
+ /* Cleanup any unconsumed responses */
+ for (i = 0; i < vd->num_cores; i++) {
+ /*
+ * Since VD is releasing, it is not necessary to lock here.
+ * Do it anyway for consistency.
+ */
+ spin_lock_irqsave(&vd->mailbox_resp_queues[i].lock, flags);
+ list_for_each_entry_safe(cur, nxt,
+ &vd->mailbox_resp_queues[i].queue,
+ list_entry) {
+ list_del(&cur->list_entry);
+ kfree(cur);
+ }
+ spin_unlock_irqrestore(&vd->mailbox_resp_queues[i].lock, flags);
+ }
+
+ for (i = 0; i < vd->num_cores; i++)
+ iommu_domain_free(vd->core_domains[i]);
+ kfree(vd->core_domains);
+ kfree(vd->mailbox_resp_queues);
kfree(vd);
}
+static void map_telemetry_buffers(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd, uint virt_core,
+ uint core)
+{
+ if (gxp->telemetry_mgr->logging_buff_data)
+ gxp_dma_map_allocated_coherent_buffer(
+ gxp,
+ gxp->telemetry_mgr->logging_buff_data->buffers[core],
+ vd, BIT(virt_core),
+ gxp->telemetry_mgr->logging_buff_data->size,
+ gxp->telemetry_mgr->logging_buff_data
+ ->buffer_daddrs[core],
+ 0);
+ if (gxp->telemetry_mgr->tracing_buff_data)
+ gxp_dma_map_allocated_coherent_buffer(
+ gxp,
+ gxp->telemetry_mgr->tracing_buff_data->buffers[core],
+ vd, BIT(virt_core),
+ gxp->telemetry_mgr->tracing_buff_data->size,
+ gxp->telemetry_mgr->tracing_buff_data
+ ->buffer_daddrs[core],
+ 0);
+}
+
+static void unmap_telemetry_buffers(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core, uint core)
+{
+ if (gxp->telemetry_mgr->logging_buff_data)
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd, BIT(virt_core),
+ gxp->telemetry_mgr->logging_buff_data->size,
+ gxp->telemetry_mgr->logging_buff_data
+ ->buffer_daddrs[core]);
+ if (gxp->telemetry_mgr->tracing_buff_data)
+ gxp_dma_unmap_allocated_coherent_buffer(
+ gxp, vd, BIT(virt_core),
+ gxp->telemetry_mgr->tracing_buff_data->size,
+ gxp->telemetry_mgr->tracing_buff_data
+ ->buffer_daddrs[core]);
+}
+
/* Caller must hold gxp->vd_semaphore for writing */
int gxp_vd_start(struct gxp_virtual_device *vd)
{
@@ -81,6 +175,7 @@ int gxp_vd_start(struct gxp_virtual_device *vd)
uint available_cores = 0;
uint cores_remaining = vd->num_cores;
uint core_list = 0;
+ uint virt_core = 0;
int ret = 0;
for (core = 0; core < GXP_NUM_CORES; core++) {
@@ -104,14 +199,30 @@ int gxp_vd_start(struct gxp_virtual_device *vd)
break;
if (core_list & BIT(core)) {
- ret = gxp_firmware_run(gxp, core);
+ gxp->core_to_vd[core] = vd;
+ cores_remaining--;
+ gxp_dma_domain_attach_device(gxp, vd, virt_core, core);
+ gxp_dma_map_core_resources(gxp, vd, virt_core, core);
+ map_telemetry_buffers(gxp, vd, virt_core, core);
+ ret = gxp_firmware_run(gxp, vd, virt_core, core);
if (ret) {
dev_err(gxp->dev, "Failed to run firmware on core %u\n",
core);
+ /*
+ * out_vd_stop will only clean up the cores that
+ * had their firmware start successfully, so we
+ * need to clean up `core` here.
+ */
+ unmap_telemetry_buffers(gxp, vd, virt_core,
+ core);
+ gxp_dma_unmap_core_resources(gxp, vd, virt_core,
+ core);
+ gxp_dma_domain_detach_device(gxp, vd,
+ virt_core);
+ gxp->core_to_vd[core] = NULL;
goto out_vd_stop;
}
- gxp->core_to_vd[core] = vd;
- cores_remaining--;
+ virt_core++;
}
}
@@ -141,6 +252,7 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
{
struct gxp_dev *gxp = vd->gxp;
uint core;
+ uint virt_core = 0;
/*
* Put all cores in the VD into reset so they can not wake each other up
@@ -155,8 +267,12 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd) {
+ gxp_firmware_stop(gxp, vd, virt_core, core);
+ unmap_telemetry_buffers(gxp, vd, virt_core, core);
+ gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
+ gxp_dma_domain_detach_device(gxp, vd, virt_core);
gxp->core_to_vd[core] = NULL;
- gxp_firmware_stop(gxp, core);
+ virt_core++;
}
}
@@ -166,14 +282,8 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
}
}
-/*
- * Helper function for use in both `gxp_vd_virt_core_to_phys_core()` and
- * `gxp_vd_virt_core_list_to_phys_core_list()`.
- *
- * Caller must have locked `gxp->vd_semaphore` for reading.
- */
-static int virt_core_to_phys_core_locked(struct gxp_virtual_device *vd,
- u16 virt_core)
+/* Caller must have locked `gxp->vd_semaphore` for reading */
+int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core)
{
struct gxp_dev *gxp = vd->gxp;
uint phys_core;
@@ -194,28 +304,14 @@ static int virt_core_to_phys_core_locked(struct gxp_virtual_device *vd,
return -EINVAL;
}
-int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core)
-{
- struct gxp_dev *gxp = vd->gxp;
- int ret;
-
- down_read(&gxp->vd_semaphore);
- ret = virt_core_to_phys_core_locked(vd, virt_core);
- up_read(&gxp->vd_semaphore);
-
- return ret;
-}
-
+/* Caller must have locked `gxp->vd_semaphore` for reading */
uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
u16 virt_core_list)
{
- struct gxp_dev *gxp = vd->gxp;
uint phys_core_list = 0;
uint virt_core = 0;
int phys_core;
- down_read(&gxp->vd_semaphore);
-
while (virt_core_list) {
/*
* Get the next virt core by finding the index of the first
@@ -228,30 +324,25 @@ uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
virt_core = ffs(virt_core_list) - 1;
/* Any invalid virt cores invalidate the whole list */
- phys_core = virt_core_to_phys_core_locked(vd, virt_core);
- if (phys_core < 0) {
- phys_core_list = 0;
- goto out;
- }
+ phys_core = gxp_vd_virt_core_to_phys_core(vd, virt_core);
+ if (phys_core < 0)
+ return 0;
phys_core_list |= BIT(phys_core);
virt_core_list &= ~BIT(virt_core);
}
-out:
- up_read(&gxp->vd_semaphore);
-
return phys_core_list;
}
-int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd, u16 phys_core)
+/* Caller must have locked `gxp->vd_semaphore` for reading */
+int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd,
+ u16 phys_core)
{
struct gxp_dev *gxp = vd->gxp;
int virt_core = 0;
uint core;
- down_read(&gxp->vd_semaphore);
-
if (gxp->core_to_vd[phys_core] != vd) {
virt_core = -EINVAL;
goto out;
@@ -265,9 +356,6 @@ int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd, u16 phys_core)
if (gxp->core_to_vd[core] == vd)
virt_core++;
}
-
out:
- up_read(&gxp->vd_semaphore);
-
return virt_core;
}
diff --git a/gxp-vd.h b/gxp-vd.h
index c0a3f49..2193d77 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -7,14 +7,29 @@
#ifndef __GXP_VD_H__
#define __GXP_VD_H__
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
+#include <linux/wait.h>
#include "gxp-internal.h"
+struct mailbox_resp_queue {
+ /* Queue of `struct gxp_async_response`s */
+ struct list_head queue;
+ /* Lock protecting access to the `queue` */
+ spinlock_t lock;
+ /* Waitqueue to wait on if the queue is empty */
+ wait_queue_head_t waitq;
+};
+
struct gxp_virtual_device {
struct gxp_dev *gxp;
uint num_cores;
void *fw_app;
+ struct iommu_domain **core_domains;
+ struct mailbox_resp_queue *mailbox_resp_queues;
};
/*
@@ -75,7 +90,10 @@ void gxp_vd_stop(struct gxp_virtual_device *vd);
/*
* Returns the physical core ID for the specified virtual_core belonging to
- * this virtual device.
+ * this virtual device or -EINVAL if this virtual core is not running on a
+ * physical core.
+ *
+ * The caller must have locked gxp->vd_semaphore for reading.
*/
int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core);
@@ -84,10 +102,18 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core);
*
* If the virtual list contains any invalid IDs, the entire physical ID list
* will be considered invalid and this function will return 0.
+ *
+ * The caller must have locked gxp->vd_semaphore for reading.
*/
uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
u16 virt_core_list);
+/*
+ * Returns the virtual core number assigned the phys_core, inside of this
+ * virtual device or -EINVAL if this core is not part of this virtual device.
+ *
+ * The caller must have locked gxp->vd_semaphore for reading.
+ */
int gxp_vd_phys_core_to_virt_core(struct gxp_virtual_device *vd, u16 phys_core);
#endif /* __GXP_VD_H__ */
diff --git a/gxp-wakelock.c b/gxp-wakelock.c
index feb5c88..9344f21 100644
--- a/gxp-wakelock.c
+++ b/gxp-wakelock.c
@@ -50,14 +50,6 @@ int gxp_wakelock_acquire(struct gxp_dev *gxp)
ret, mgr->count);
goto err_blk_on;
}
-
- ret = gxp_dma_ssmt_program(gxp);
- if (ret) {
- dev_err(gxp->dev,
- "Failed to program SSMTs after powering on BLK_AUR (ret=%d)\n",
- ret);
- goto err_ssmt_program;
- }
}
out:
@@ -65,8 +57,6 @@ out:
return ret;
-err_ssmt_program:
- gxp_pm_blk_off(gxp);
err_blk_on:
mgr->count--;
mutex_unlock(&mgr->lock);
diff --git a/gxp.h b/gxp.h
index 971b1f1..9f08925 100644
--- a/gxp.h
+++ b/gxp.h
@@ -24,19 +24,6 @@
#define GXP_MMAP_LOG_BUFFER_OFFSET 0x10000
#define GXP_MMAP_TRACE_BUFFER_OFFSET 0x20000
-/*
- * TODO(b/209083969) The following IOCTLs will no longer require the caller
- * to hold a virtual device wakelock to call them once virtual device
- * suspend/resume is implemented:
- * - GXP_MAP_BUFFER
- * - GXP_UNMAP_BUFFER
- * - GXP_SYNC_BUFFER
- * - GXP_MAP_DMABUF
- * - GXP_UNMAP_DMABUF
- * - GXP_MAP_TPU_MBX_QUEUE
- * - GXP_UNMAP_TPU_MBX_QUEUE
- */
-
#define GXP_IOCTL_BASE 0xEE
#define GXP_INTERFACE_VERSION_BUILD_BUFFER_SIZE 64
@@ -324,7 +311,7 @@ struct gxp_map_ioctl {
/*
* Map host buffer.
*
- * The client must hold a VIRTUAL_DEVICE wakelock.
+ * The client must have allocated a virtual device.
*/
#define GXP_MAP_BUFFER \
_IOWR(GXP_IOCTL_BASE, 0, struct gxp_map_ioctl)
@@ -336,7 +323,7 @@ struct gxp_map_ioctl {
* from the kernel's internal records. It is recommended to use the argument
* that was passed in GXP_MAP_BUFFER to un-map the buffer.
*
- * The client must hold a VIRTUAL_DEVICE wakelock.
+ * The client must have allocated a virtual device.
*/
#define GXP_UNMAP_BUFFER \
_IOW(GXP_IOCTL_BASE, 1, struct gxp_map_ioctl)
@@ -374,7 +361,7 @@ struct gxp_sync_ioctl {
/*
* Sync buffer previously mapped by GXP_MAP_BUFFER.
*
- * The client must hold a VIRTUAL_DEVICE wakelock.
+ * The client must have allocated a virtual device.
*
* EINVAL: If a mapping for @device_address is not found.
* EINVAL: If @size equals 0.
@@ -420,7 +407,7 @@ struct gxp_map_dmabuf_ioctl {
/*
* Map host buffer via its dma-buf FD.
*
- * The client must hold a VIRTUAL_DEVICE wakelock.
+ * The client must have allocated a virtual device.
*/
#define GXP_MAP_DMABUF _IOWR(GXP_IOCTL_BASE, 20, struct gxp_map_dmabuf_ioctl)
@@ -431,7 +418,7 @@ struct gxp_map_dmabuf_ioctl {
* kernel's internal records. It is recommended to use the argument that was
* passed in GXP_MAP_DMABUF to un-map the dma-buf.
*
- * The client must hold a VIRTUAL_DEVICE wakelock.
+ * The client must have allocated a virtual device.
*/
#define GXP_UNMAP_DMABUF _IOW(GXP_IOCTL_BASE, 21, struct gxp_map_dmabuf_ioctl)
@@ -804,7 +791,7 @@ struct gxp_tpu_mbx_queue_ioctl {
/*
* Map TPU-DSP mailbox cmd/rsp queue buffers.
*
- * The client must hold a VIRTUAL_DEVICE wakelock.
+ * The client must have allocated a virtual device.
*/
#define GXP_MAP_TPU_MBX_QUEUE \
_IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)
@@ -817,7 +804,7 @@ struct gxp_tpu_mbx_queue_ioctl {
* from the kernel's internal records. It is recommended to use the argument
* that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
*
- * The client must hold a VIRTUAL_DEVICE wakelock.
+ * The client must have allocated a virtual device.
*/
#define GXP_UNMAP_TPU_MBX_QUEUE \
_IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
diff --git a/include/soc/google/exynos_pm_qos.h b/include/soc/google/exynos_pm_qos.h
deleted file mode 100644
index 441b06d..0000000
--- a/include/soc/google/exynos_pm_qos.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_EXYNOS_PM_QOS_H
-#define _LINUX_EXYNOS_PM_QOS_H
-/* interface for the exynos_pm_qos_power infrastructure of the linux kernel.
- *
- * Mark Gross <mgross@linux.intel.com>
- */
-
-#include <linux/plist.h>
-#include <linux/workqueue.h>
-
-enum {
- EXYNOS_PM_QOS_RESERVED = 0,
- PM_QOS_CLUSTER0_FREQ_MIN,
- PM_QOS_CLUSTER0_FREQ_MAX,
- PM_QOS_CLUSTER1_FREQ_MIN,
- PM_QOS_CLUSTER1_FREQ_MAX,
- PM_QOS_CLUSTER2_FREQ_MIN,
- PM_QOS_CLUSTER2_FREQ_MAX,
- PM_QOS_DEVICE_THROUGHPUT,
- PM_QOS_INTCAM_THROUGHPUT,
- PM_QOS_DEVICE_THROUGHPUT_MAX,
- PM_QOS_INTCAM_THROUGHPUT_MAX,
- PM_QOS_BUS_THROUGHPUT,
- PM_QOS_BUS_THROUGHPUT_MAX,
- PM_QOS_DISPLAY_THROUGHPUT,
- PM_QOS_DISPLAY_THROUGHPUT_MAX,
- PM_QOS_CAM_THROUGHPUT,
- PM_QOS_CAM_THROUGHPUT_MAX,
- PM_QOS_MFC_THROUGHPUT,
- PM_QOS_MFC_THROUGHPUT_MAX,
- PM_QOS_TNR_THROUGHPUT,
- PM_QOS_TNR_THROUGHPUT_MAX,
- PM_QOS_BO_THROUGHPUT,
- PM_QOS_BO_THROUGHPUT_MAX,
- PM_QOS_GPU_THROUGHPUT_MIN,
- PM_QOS_GPU_THROUGHPUT_MAX,
- EXYNOS_PM_QOS_NUM_CLASSES,
-};
-
-struct exynos_pm_qos_request {
- struct plist_node node;
- int exynos_pm_qos_class;
- struct delayed_work work; /* for exynos_pm_qos_update_request_timeout */
- const char *func;
- unsigned int line;
-};
-
-static inline void exynos_pm_qos_add_request(struct exynos_pm_qos_request *req,
- int exynos_pm_qos_class, s32 value)
-{
-}
-
-static inline void
-exynos_pm_qos_update_request(struct exynos_pm_qos_request *req, s32 new_value)
-{
-}
-
-static inline void
-exynos_pm_qos_remove_request(struct exynos_pm_qos_request *req)
-{
-}
-
-#endif /* _LINUX_EXYNOS_PM_QOS_H */