summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNeela Chithirala <chithiralan@google.com>2022-02-14 03:48:34 +0000
committerNeela Chithirala <chithiralan@google.com>2022-02-14 03:48:34 +0000
commit49087428c8fea8eb57a4f57f4fe83e0d2f26b113 (patch)
tree664288dd4dda3f59afb7de031b65ddf77c271bbe
parent3ccb2479717de3089dbbcb894ddd045b2ddc256c (diff)
downloadgs201-49087428c8fea8eb57a4f57f4fe83e0d2f26b113.tar.gz
Merge branch 'gs201-release' to android13-gs-pixel-5.10
* gs201-release: gxp: Implement VIRTUAL_DEVICE wakelock IOCTL Bug: 201600514 gxp: Implement BLOCK wakelock IOCTL Bug: 201600514 Signed-off-by: Neela Chithirala <chithiralan@google.com> Change-Id: I34ede1478d6804970b4672c445c7b7d2073f46ef
-rw-r--r--Makefile1
-rw-r--r--gxp-client.c61
-rw-r--r--gxp-client.h50
-rw-r--r--gxp-csrs.h6
-rw-r--r--gxp-debug-dump.c308
-rw-r--r--gxp-debug-dump.h21
-rw-r--r--gxp-debugfs.c158
-rw-r--r--gxp-internal.h14
-rw-r--r--gxp-platform.c338
-rw-r--r--gxp-vd.c207
-rw-r--r--gxp-vd.h62
-rw-r--r--gxp.h142
12 files changed, 1025 insertions, 343 deletions
diff --git a/Makefile b/Makefile
index 97d393e..8076f25 100644
--- a/Makefile
+++ b/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_GXP) += gxp.o
gxp-objs += \
gxp-bpm.o \
+ gxp-client.o \
gxp-debug-dump.o \
gxp-debugfs.o \
gxp-doorbell.o \
diff --git a/gxp-client.c b/gxp-client.c
new file mode 100644
index 0000000..2ff0a7c
--- /dev/null
+++ b/gxp-client.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP client structure.
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "gxp-client.h"
+#include "gxp-dma.h"
+#include "gxp-internal.h"
+#include "gxp-pm.h"
+#include "gxp-vd.h"
+#include "gxp-wakelock.h"
+
+struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
+{
+ struct gxp_client *client;
+
+ client = kmalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ client->gxp = gxp;
+ init_rwsem(&client->semaphore);
+ client->has_block_wakelock = false;
+ client->requested_power_state = AUR_OFF;
+ client->requested_memory_power_state = 0;
+ client->vd = NULL;
+ client->tpu_mbx_allocated = false;
+ return client;
+}
+
+void gxp_client_destroy(struct gxp_client *client)
+{
+ struct gxp_dev *gxp = client->gxp;
+
+ down_write(&gxp->vd_semaphore);
+
+#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
+ /*
+ * Unmap TPU buffers, if the mapping is already removed, this
+ * is a no-op.
+ */
+ gxp_dma_unmap_tpu_buffer(gxp, client->mbx_desc);
+#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
+
+ if (client->has_vd_wakelock)
+ gxp_vd_stop(client->vd);
+
+ up_write(&gxp->vd_semaphore);
+
+ if (client->has_block_wakelock)
+ gxp_wakelock_release(client->gxp);
+
+ gxp_vd_release(client->vd);
+
+ kfree(client);
+}
diff --git a/gxp-client.h b/gxp-client.h
new file mode 100644
index 0000000..97ba489
--- /dev/null
+++ b/gxp-client.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP client structure.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#ifndef __GXP_CLIENT_H__
+#define __GXP_CLIENT_H__
+
+#include <linux/rwsem.h>
+#include <linux/types.h>
+
+#include "gxp-internal.h"
+#include "gxp-vd.h"
+
+/* Holds state belonging to a client */
+struct gxp_client {
+ struct gxp_dev *gxp;
+
+ /*
+ * Protects all state of this client instance.
+ * Any operation that requires a client hold a particular wakelock must
+ * lock this semaphore for reading for the duration of that operation.
+ */
+ struct rw_semaphore semaphore;
+
+ bool has_block_wakelock;
+ bool has_vd_wakelock;
+ /* Value is one of the GXP_POWER_STATE_* values from gxp.h. */
+ uint requested_power_state;
+ /* Value is one of the MEMORY_POWER_STATE_* values from gxp.h. */
+ uint requested_memory_power_state;
+
+ struct gxp_virtual_device *vd;
+ bool tpu_mbx_allocated;
+ struct gxp_tpu_mbx_desc mbx_desc;
+};
+
+/*
+ * Allocates and initializes a client container.
+ */
+struct gxp_client *gxp_client_create(struct gxp_dev *gxp);
+
+/*
+ * Frees up the client container cleaning up any wakelocks, virtual devices, or
+ * TPU mailboxes it holds.
+ */
+void gxp_client_destroy(struct gxp_client *client);
+
+#endif /* __GXP_CLIENT_H__ */
diff --git a/gxp-csrs.h b/gxp-csrs.h
index d077b9e..95100d1 100644
--- a/gxp-csrs.h
+++ b/gxp-csrs.h
@@ -10,6 +10,8 @@
#define GXP_REG_DOORBELLS_SET_WRITEMASK 0x1
#define GXP_REG_DOORBELLS_CLEAR_WRITEMASK 0x1
+#define GXP_CMU_OFFSET 0x200000
+
enum gxp_csrs {
GXP_REG_LPM_VERSION = 0x40000,
GXP_REG_LPM_PSM_0 = 0x41000,
@@ -68,6 +70,10 @@ enum gxp_core_csrs {
#define TIMER_VALUE_OFFSET 0x8
#define TIMER_COUNT 8
+/* CMU offset */
+#define PLL_CON0_PLL_AUR 0x100
+#define PLL_CON0_NOC_USER 0x610
+
/* LPM Registers */
#define LPM_VERSION_OFFSET 0x0
#define TRIGGER_CSR_START_OFFSET 0x4
diff --git a/gxp-debug-dump.c b/gxp-debug-dump.c
index 5c7ae61..87d5539 100644
--- a/gxp-debug-dump.c
+++ b/gxp-debug-dump.c
@@ -25,6 +25,7 @@
#define GXP_COREDUMP_PENDING 0xF
#define KERNEL_INIT_DUMP_TIMEOUT (10000 * GXP_TIME_DELAY_FACTOR)
+#define SSCD_MSG_LENGTH 64
/* Enum indicating the debug dump request reason. */
enum gxp_debug_dump_init_type {
@@ -223,10 +224,13 @@ gxp_get_lpm_registers(struct gxp_dev *gxp, struct gxp_seg_header *seg_header,
dev_dbg(gxp->dev, "Done getting LPM registers\n");
}
-static void gxp_get_common_dump(struct gxp_dev *gxp,
- struct gxp_seg_header *common_seg_header,
- struct gxp_common_dump_data *common_dump_data)
+static void gxp_get_common_dump(struct gxp_dev *gxp)
{
+ struct gxp_common_dump *common_dump = gxp->debug_dump_mgr->common_dump;
+ struct gxp_seg_header *common_seg_header = common_dump->seg_header;
+ struct gxp_common_dump_data *common_dump_data =
+ &common_dump->common_dump_data;
+
gxp_get_common_registers(gxp,
&common_seg_header[GXP_COMMON_REGISTERS_IDX],
&common_dump_data->common_regs);
@@ -241,168 +245,199 @@ static void gxp_get_common_dump(struct gxp_dev *gxp,
common_dump_data->common_regs.aurora_revision);
}
-static void gxp_handle_debug_dump(struct gxp_dev *gxp,
- struct gxp_core_dump *core_dump,
- struct gxp_common_dump *common_dump,
- enum gxp_debug_dump_init_type init_type,
- uint core_bits)
-{
- struct gxp_core_dump_header *core_dump_header;
- struct gxp_core_header *core_header;
- int i;
#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+static void gxp_send_to_sscd(struct gxp_dev *gxp, void *segs, int seg_cnt,
+ const char *info) {
+
+ struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
struct sscd_platform_data *pdata =
- (struct sscd_platform_data *)gxp->debug_dump_mgr->sscd_pdata;
- struct sscd_segment *segs;
- int segs_num = GXP_NUM_COMMON_SEGMENTS;
- int seg_idx = 0;
- int core_dump_num = 0;
- int j;
- void *data_addr;
+ (struct sscd_platform_data *)mgr->sscd_pdata;
- for (i = 0; i < GXP_NUM_CORES; i++) {
- if (core_bits & BIT(i))
- core_dump_num++;
+ if (!pdata->sscd_report) {
+ dev_err(gxp->dev, "Failed to generate coredump\n");
+ return;
+ }
+
+ if (pdata->sscd_report(gxp->debug_dump_mgr->sscd_dev, segs, seg_cnt,
+ SSCD_FLAGS_ELFARM64HDR, info)) {
+ dev_err(gxp->dev, "Unable to send the report to SSCD daemon\n");
+ return;
}
/*
- * segs_num include the common segments, core segments for each core,
- * core header for each core
+ * This delay is needed to ensure there's sufficient time
+ * in between sscd_report() being called, as the file name of
+ * the core dump files generated by the SSCD daemon includes a
+ * time format with a seconds precision.
*/
- if (init_type == DEBUG_DUMP_FW_INIT)
- segs_num += GXP_NUM_CORE_SEGMENTS + 1;
- else
- segs_num += GXP_NUM_CORE_SEGMENTS * core_dump_num +
- core_dump_num;
+ msleep(1000);
+}
+#endif
- segs = kmalloc_array(segs_num, sizeof(struct sscd_segment),
- GFP_KERNEL);
- if (!segs)
- goto out;
+static void gxp_handle_debug_dump(struct gxp_dev *gxp, uint32_t core_id)
+{
+ struct gxp_core_dump_header *core_dump_header;
+ struct gxp_core_header *core_header;
+ struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
+ struct gxp_core_dump *core_dump = mgr->core_dump;
+ struct gxp_common_dump *common_dump = mgr->common_dump;
+ int i;
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+ int seg_idx = 0;
+ void *data_addr;
+ char sscd_msg[SSCD_MSG_LENGTH];
/* Common */
data_addr = &common_dump->common_dump_data.common_regs;
for (i = 0; i < GXP_NUM_COMMON_SEGMENTS; i++) {
- segs[seg_idx].addr = data_addr;
- segs[seg_idx].size = common_dump->seg_header[i].size;
- data_addr += segs[seg_idx].size;
+ mgr->segs[core_id][seg_idx].addr = data_addr;
+ mgr->segs[core_id][seg_idx].size =
+ common_dump->seg_header[i].size;
+ data_addr += mgr->segs[core_id][seg_idx].size;
seg_idx++;
}
-#endif // CONFIG_SUBSYSTEM_COREDUMP
+#endif
/* Core */
- for (i = 0; i < GXP_NUM_CORES; i++) {
- if ((core_bits & BIT(i)) == 0)
- continue;
-
- core_dump_header = &core_dump->core_dump_header[i];
- core_header = &core_dump_header->core_header;
- if (!core_header->dump_available) {
- dev_err(gxp->dev,
- "Core dump should have been available\n");
- goto out;
- }
-
+ core_dump_header = &core_dump->core_dump_header[core_id];
+ core_header = &core_dump_header->core_header;
+ if (!core_header->dump_available) {
+ dev_err(gxp->dev,
+ "Core dump should have been available\n");
+ return;
+ }
#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
- /* Core Header */
- segs[seg_idx].addr = core_header;
- segs[seg_idx].size = sizeof(struct gxp_core_header);
+ /* Core Header */
+ mgr->segs[core_id][seg_idx].addr = core_header;
+ mgr->segs[core_id][seg_idx].size = sizeof(struct gxp_core_header);
+ seg_idx++;
+
+ data_addr = &core_dump->dump_data[core_id *
+ core_header->core_dump_size /
+ sizeof(u32)];
+
+ for (i = 0; i < GXP_NUM_CORE_SEGMENTS - 1; i++) {
+ mgr->segs[core_id][seg_idx].addr = data_addr;
+ mgr->segs[core_id][seg_idx].size =
+ core_dump_header->seg_header[i].size;
+ data_addr += mgr->segs[core_id][seg_idx].size;
seg_idx++;
+ }
- data_addr = &core_dump->dump_data[i *
- core_header->core_dump_size /
- sizeof(u32)];
+ dev_dbg(gxp->dev, "Passing dump data to SSCD daemon\n");
+ snprintf(sscd_msg, SSCD_MSG_LENGTH - 1,
+ "gxp debug dump - dump data (core %0x)", core_id);
+ gxp_send_to_sscd(gxp, mgr->segs[core_id], seg_idx, sscd_msg);
+#endif
+ /* This bit signals that core dump has been processed */
+ core_header->dump_available = 0;
- for (j = 0; j < GXP_NUM_CORE_SEGMENTS; j++) {
- segs[seg_idx].addr = data_addr;
- segs[seg_idx].size =
- core_dump_header->seg_header[j].size;
- data_addr += segs[seg_idx].size;
- seg_idx++;
- }
+ for (i = 0; i < GXP_NUM_COMMON_SEGMENTS; i++)
+ common_dump->seg_header[i].valid = 0;
- dev_notice(gxp->dev, "Passing dump data to SSCD daemon\n");
- if (!pdata->sscd_report) {
- dev_err(gxp->dev,
- "Failed to generate coredump\n");
- goto out;
- }
+ for (i = 0; i < GXP_NUM_CORE_SEGMENTS; i++)
+ core_dump_header->seg_header[i].valid = 0;
- mutex_lock(&gxp->debug_dump_mgr->sscd_lock);
- if (pdata->sscd_report(gxp->debug_dump_mgr->sscd_dev, segs,
- segs_num, SSCD_FLAGS_ELFARM64HDR,
- "gxp debug dump")) {
- dev_err(gxp->dev,
- "Unable to send the report to SSCD daemon\n");
- mutex_unlock(&gxp->debug_dump_mgr->sscd_lock);
- goto out;
- }
+ return;
+}
- /*
- * This delay is needed to ensure there's sufficient time
- * in between sscd_report() being called, as the file name of
- * the core dump files generated by the SSCD daemon includes a
- * time format with a seconds precision.
- */
- msleep(1000);
- mutex_unlock(&gxp->debug_dump_mgr->sscd_lock);
-#endif // CONFIG_SUBSYSTEM_COREDUMP
-
- /* This bit signals that core dump has been processed */
- core_header->dump_available = 0;
-
- if (init_type == DEBUG_DUMP_FW_INIT)
- goto out;
+static void gxp_free_segments(struct gxp_dev *gxp) {
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+ int core_id;
+
+ for (core_id = 0; core_id < GXP_NUM_CORES; core_id++)
+ kfree(gxp->debug_dump_mgr->segs[core_id]);
+#endif
+ kfree(gxp->debug_dump_mgr->common_dump);
+}
+
+static int gxp_init_segments(struct gxp_dev *gxp) {
+#if !IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+ return 0;
+#else
+ struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
+ int segs_num = GXP_NUM_COMMON_SEGMENTS;
+ int core_id = 0;
+
+ /*
+ * segs_num include the common segments, core segments for each core,
+ * core header for each core
+ */
+ segs_num += GXP_NUM_CORE_SEGMENTS + 1;
+ for (core_id = 0; core_id < GXP_NUM_CORES; core_id++) {
+ mgr->segs[core_id] = kmalloc_array(segs_num,
+ sizeof(struct sscd_segment),
+ GFP_KERNEL);
+ if (!mgr->segs[core_id])
+ goto err_out;
}
-out:
+ mgr->common_dump = kmalloc(sizeof(*mgr->common_dump), GFP_KERNEL);
+ if (!mgr->common_dump)
+ goto err_out;
+
+ return 0;
+err_out:
+ gxp_free_segments(gxp);
+
+ return -ENOMEM;
+#endif
+}
+
+static void gxp_handle_dram_dump(struct gxp_dev *gxp, uint32_t core_id) {
+ struct gxp_debug_dump_manager *mgr = gxp->debug_dump_mgr;
+ struct gxp_core_dump_header *core_dump_header =
+ &mgr->core_dump->core_dump_header[core_id];
+ struct gxp_seg_header *dram_seg_header =
+ &core_dump_header->seg_header[GXP_CORE_DRAM_SEGMENT_IDX];
#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
- kfree(segs);
+ struct sscd_segment *sscd_seg =
+ &mgr->segs[core_id][GXP_DEBUG_DUMP_DRAM_SEGMENT_IDX];
+ char sscd_msg[SSCD_MSG_LENGTH];
+
+ sscd_seg->addr = gxp->fwbufs[core_id].vaddr;
+ sscd_seg->size = gxp->fwbufs[core_id].size;
+
+ dev_dbg(gxp->dev, "Passing dram data to SSCD daemon\n");
+ snprintf(sscd_msg, SSCD_MSG_LENGTH - 1,
+ "gxp debug dump - dram data (core %0x)", core_id);
+ gxp_send_to_sscd(gxp, sscd_seg, 1, sscd_msg);
#endif
- return;
+ dram_seg_header->valid = 1;
}
-static int gxp_generate_coredump(struct gxp_dev *gxp,
- enum gxp_debug_dump_init_type init_type,
- uint core_bits)
-{
+static bool gxp_is_segment_valid(struct gxp_dev *gxp, uint32_t core_id,
+ int seg_idx) {
struct gxp_core_dump *core_dump;
- struct gxp_common_dump *common_dump;
- struct gxp_seg_header *common_seg_header;
- struct gxp_common_dump_data *common_dump_data;
+ struct gxp_core_dump_header *core_dump_header;
+ struct gxp_seg_header *seg_header;
+
+ core_dump = gxp->debug_dump_mgr->core_dump;
+ core_dump_header = &core_dump->core_dump_header[core_id];
+ seg_header = &core_dump_header->seg_header[seg_idx];
+
+ return seg_header->valid;
+}
+static int gxp_generate_coredump(struct gxp_dev *gxp, uint32_t core_id)
+{
if (!gxp->debug_dump_mgr->core_dump) {
dev_err(gxp->dev, "Core dump not allocated\n");
return -EINVAL;
}
- if (core_bits == 0) {
- dev_err(gxp->dev, "The number of core dumps requested is 0.\n");
- return -EINVAL;
- } else if (core_bits > GXP_COREDUMP_PENDING) {
- dev_err(gxp->dev,
- "The number of core dumps requested (%0x) is greater than expected (%0x)\n",
- core_bits, GXP_COREDUMP_PENDING);
- return -EINVAL;
- }
-
gxp_debug_dump_cache_invalidate(gxp);
- core_dump = gxp->debug_dump_mgr->core_dump;
- common_dump = kmalloc(sizeof(*common_dump), GFP_KERNEL);
- if (!common_dump)
- return -ENOMEM;
-
- common_seg_header = common_dump->seg_header;
- common_dump_data = &common_dump->common_dump_data;
-
- gxp_get_common_dump(gxp, common_seg_header, common_dump_data);
+ mutex_lock(&gxp->debug_dump_mgr->debug_dump_lock);
- gxp_handle_debug_dump(gxp, core_dump, common_dump, init_type,
- core_bits);
+ if (!gxp_is_segment_valid(gxp, core_id, GXP_CORE_DRAM_SEGMENT_IDX)) {
+ gxp_handle_dram_dump(gxp, core_id);
+ } else {
+ gxp_get_common_dump(gxp);
+ gxp_handle_debug_dump(gxp, core_id);
+ }
- /* Mark the common segments as read */
- common_seg_header->valid = 0;
+ mutex_unlock(&gxp->debug_dump_mgr->debug_dump_lock);
gxp_debug_dump_cache_flush(gxp);
@@ -414,6 +449,8 @@ static void gxp_wait_kernel_init_dump_work(struct work_struct *work)
struct gxp_debug_dump_manager *mgr =
container_of(work, struct gxp_debug_dump_manager,
wait_kernel_init_dump_work);
+ u32 core_bits;
+ int i;
wait_event_timeout(mgr->kernel_init_dump_waitq,
mgr->kernel_init_dump_pending ==
@@ -421,8 +458,12 @@ static void gxp_wait_kernel_init_dump_work(struct work_struct *work)
msecs_to_jiffies(KERNEL_INIT_DUMP_TIMEOUT));
mutex_lock(&mgr->lock);
- gxp_generate_coredump(mgr->gxp, DEBUG_DUMP_KERNEL_INIT,
- mgr->kernel_init_dump_pending);
+ core_bits = mgr->kernel_init_dump_pending;
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (!(core_bits & BIT(i)))
+ continue;
+ gxp_generate_coredump(mgr->gxp, i);
+ }
mgr->kernel_init_dump_pending = 0;
mutex_unlock(&mgr->lock);
}
@@ -460,7 +501,7 @@ void gxp_debug_dump_process_dump(struct work_struct *work)
switch (core_header->dump_req_reason) {
case DEBUG_DUMP_FW_INIT:
- gxp_generate_coredump(gxp, DEBUG_DUMP_FW_INIT, BIT(core_id));
+ gxp_generate_coredump(gxp, core_id);
break;
case DEBUG_DUMP_KERNEL_INIT:
mutex_lock(&mgr->lock);
@@ -489,7 +530,7 @@ int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
struct resource r;
struct gxp_debug_dump_manager *mgr;
struct gxp_core_dump_header *core_dump_header;
- int core;
+ int core, i;
mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
if (!mgr)
@@ -524,6 +565,8 @@ int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
for (core = 0; core < GXP_NUM_CORES; core++) {
core_dump_header = &mgr->core_dump->core_dump_header[core];
core_dump_header->core_header.dump_available = 0;
+ for (i = 0; i < GXP_NUM_CORE_SEGMENTS; i++)
+ core_dump_header->seg_header[i].valid = 0;
mgr->debug_dump_works[core].gxp = gxp;
mgr->debug_dump_works[core].core_id = core;
@@ -531,13 +574,15 @@ int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata)
gxp_debug_dump_process_dump);
}
+ gxp_init_segments(gxp);
+
/* No need for a DMA handle since the carveout is coherent */
mgr->debug_dump_dma_handle = 0;
mgr->kernel_init_dump_pending = 0;
mgr->sscd_dev = sscd_dev;
mgr->sscd_pdata = sscd_pdata;
mutex_init(&mgr->lock);
- mutex_init(&mgr->sscd_lock);
+ mutex_init(&mgr->debug_dump_lock);
INIT_WORK(&mgr->wait_kernel_init_dump_work,
gxp_wait_kernel_init_dump_work);
@@ -557,11 +602,12 @@ void gxp_debug_dump_exit(struct gxp_dev *gxp)
}
cancel_work_sync(&mgr->wait_kernel_init_dump_work);
+ gxp_free_segments(gxp);
/* TODO (b/200169232) Remove this once we're using devm_memremap */
memunmap(gxp->coredumpbuf.vaddr);
mutex_destroy(&mgr->lock);
- mutex_destroy(&mgr->sscd_lock);
+ mutex_destroy(&mgr->debug_dump_lock);
devm_kfree(mgr->gxp->dev, mgr);
gxp->debug_dump_mgr = NULL;
}
diff --git a/gxp-debug-dump.h b/gxp-debug-dump.h
index 013f27c..453c045 100644
--- a/gxp-debug-dump.h
+++ b/gxp-debug-dump.h
@@ -11,10 +11,18 @@
#include <linux/types.h>
#include <linux/workqueue.h>
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+#include <linux/platform_data/sscoredump.h>
+#endif
+
#include "gxp-internal.h"
#define GXP_NUM_COMMON_SEGMENTS 2
-#define GXP_NUM_CORE_SEGMENTS 7
+#define GXP_NUM_CORE_SEGMENTS 8
+#define GXP_CORE_DRAM_SEGMENT_IDX 7
+#define GXP_DEBUG_DUMP_CORE_SEGMENT_IDX_START GXP_NUM_COMMON_SEGMENTS + 1
+#define GXP_DEBUG_DUMP_DRAM_SEGMENT_IDX GXP_DEBUG_DUMP_CORE_SEGMENT_IDX_START \
+ + GXP_CORE_DRAM_SEGMENT_IDX
#define GXP_SEG_HEADER_NAME_LENGTH 32
#define GXP_Q7_ICACHE_SIZE 131072 /* I-cache size in bytes */
@@ -151,6 +159,7 @@ struct gxp_debug_dump_manager {
struct gxp_dev *gxp;
struct gxp_debug_dump_work debug_dump_works[GXP_NUM_CORES];
struct gxp_core_dump *core_dump; /* start of the core dump */
+ struct gxp_common_dump *common_dump;
void *sscd_dev;
void *sscd_pdata;
dma_addr_t debug_dump_dma_handle; /* dma handle for debug dump */
@@ -160,8 +169,14 @@ struct gxp_debug_dump_manager {
int kernel_init_dump_pending;
wait_queue_head_t kernel_init_dump_waitq;
struct work_struct wait_kernel_init_dump_work;
- /* SSCD lock to ensure SSCD is only processing one report at a time */
- struct mutex sscd_lock;
+ /*
+ * Debug dump lock to ensure only one debug dump is being processed at a
+ * time
+ */
+ struct mutex debug_dump_lock;
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP)
+ struct sscd_segment *segs[GXP_NUM_CORES];
+#endif
};
int gxp_debug_dump_init(struct gxp_dev *gxp, void *sscd_dev, void *sscd_pdata);
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index c9ba69c..0576c9a 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -5,7 +5,12 @@
* Copyright (C) 2021 Google LLC
*/
+#ifdef CONFIG_GXP_CLOUDRIPPER
+#include <linux/acpm_dvfs.h>
+#endif
+
#include "gxp.h"
+#include "gxp-client.h"
#include "gxp-debug-dump.h"
#include "gxp-debugfs.h"
#include "gxp-firmware.h"
@@ -14,6 +19,7 @@
#include "gxp-pm.h"
#include "gxp-mailbox.h"
#include "gxp-telemetry.h"
+#include "gxp-lpm.h"
#include "gxp-vd.h"
#include "gxp-wakelock.h"
@@ -112,10 +118,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(gxp_pingpong_fops, NULL, gxp_debugfs_pingpong,
static int gxp_firmware_run_set(void *data, u64 val)
{
struct gxp_dev *gxp = (struct gxp_dev *) data;
- struct gxp_client *client_to_delete;
+ struct gxp_client *client;
int ret = 0;
- down_write(&gxp->vd_semaphore);
+ mutex_lock(&gxp->debugfs_client_lock);
if (val) {
if (gxp->debugfs_client) {
@@ -124,9 +130,6 @@ static int gxp_firmware_run_set(void *data, u64 val)
goto out;
}
- /* Cannot run firmware without a wakelock */
- gxp_wakelock_acquire(gxp);
-
/*
* Cleanup any bad state or corruption the device might've
* caused
@@ -134,45 +137,61 @@ static int gxp_firmware_run_set(void *data, u64 val)
gxp_fw_data_destroy(gxp);
gxp_fw_data_init(gxp);
- gxp->debugfs_client = gxp_client_create(gxp);
- if (IS_ERR(gxp->debugfs_client)) {
+ client = gxp_client_create(gxp);
+ if (IS_ERR(client)) {
dev_err(gxp->dev, "Failed to create client\n");
- ret = PTR_ERR(gxp->debugfs_client);
- gxp->debugfs_client = NULL;
- gxp_wakelock_release(gxp);
goto out;
}
+ gxp->debugfs_client = client;
- ret = gxp_vd_allocate(gxp->debugfs_client, GXP_NUM_CORES);
- if (ret) {
+ gxp->debugfs_client->vd = gxp_vd_allocate(gxp, GXP_NUM_CORES);
+ if (IS_ERR(gxp->debugfs_client->vd)) {
dev_err(gxp->dev, "Failed to allocate VD\n");
- gxp_client_destroy(gxp->debugfs_client);
- gxp->debugfs_client = NULL;
- gxp_wakelock_release(gxp);
- goto out;
+ ret = PTR_ERR(gxp->debugfs_client->vd);
+ goto err_start;
}
+
+ ret = gxp_wakelock_acquire(gxp);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to acquire BLOCK wakelock\n");
+ goto err_start;
+ }
+ gxp->debugfs_client->has_block_wakelock = true;
+
+ down_write(&gxp->vd_semaphore);
+ ret = gxp_vd_start(gxp->debugfs_client->vd);
+ up_write(&gxp->vd_semaphore);
+ if (ret) {
+ dev_err(gxp->dev, "Failed to start VD\n");
+ goto err_start;
+ }
+ gxp->debugfs_client->has_vd_wakelock = true;
} else {
if (!gxp->debugfs_client) {
dev_err(gxp->dev, "Firmware not running!\n");
ret = -EIO;
goto out;
}
- client_to_delete = gxp->debugfs_client;
- gxp->debugfs_client = NULL;
-
- up_write(&gxp->vd_semaphore);
-
- gxp_client_destroy(client_to_delete);
- gxp_wakelock_release(gxp);
- /* Return here, since vd_semaphore has already been unlocked */
- return ret;
+ /*
+ * Cleaning up the client will stop the VD it owns and release
+ * the BLOCK wakelock it is holding.
+ */
+ gxp_client_destroy(gxp->debugfs_client);
+ gxp->debugfs_client = NULL;
}
out:
- up_write(&gxp->vd_semaphore);
+ mutex_unlock(&gxp->debugfs_client_lock);
return ret;
+
+err_start:
+ /* Destroying a client cleans up any VDss or wakelocks it held. */
+ gxp_client_destroy(gxp->debugfs_client);
+ gxp->debugfs_client = NULL;
+ mutex_unlock(&gxp->debugfs_client_lock);
+ return ret;
}
static int gxp_firmware_run_get(void *data, u64 *val)
@@ -351,12 +370,86 @@ out:
DEFINE_DEBUGFS_ATTRIBUTE(gxp_log_eventfd_signal_fops, NULL,
gxp_log_eventfd_signal_set, "%llu\n");
+/* TODO: Remove these mux entry once experiment is done */
+static int gxp_cmu_mux1_set(void *data, u64 val)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
+ void *addr;
+
+ if (val > 1) {
+ dev_err(gxp->dev, "Incorrect val for cmu_mux1, only 0 and 1 allowed\n");
+ return -EINVAL;
+ }
+
+ addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
+
+ if (!addr) {
+ dev_err(gxp->dev, "Cannot map CMU1 address\n");
+ return -EIO;
+ }
+
+ writel(val << 4, addr + PLL_CON0_PLL_AUR);
+ iounmap(addr);
+ return 0;
+}
+
+static int gxp_cmu_mux1_get(void *data, u64 *val)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
+ void *addr;
+
+ addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
+ *val = readl(addr + PLL_CON0_PLL_AUR);
+ iounmap(addr);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(gxp_cmu_mux1_fops, gxp_cmu_mux1_get, gxp_cmu_mux1_set,
+ "%llu\n");
+
+static int gxp_cmu_mux2_set(void *data, u64 val)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
+ void *addr;
+
+ if (val > 1) {
+ dev_err(gxp->dev, "Incorrect val for cmu_mux2, only 0 and 1 allowed\n");
+ return -EINVAL;
+ }
+
+ addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
+
+ if (!addr) {
+ dev_err(gxp->dev, "Cannot map CMU2 address\n");
+ return -EIO;
+ }
+
+ writel(val << 4, addr + PLL_CON0_NOC_USER);
+ iounmap(addr);
+ return 0;
+}
+
+static int gxp_cmu_mux2_get(void *data, u64 *val)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
+ void *addr;
+
+ addr = ioremap(gxp->regs.paddr - GXP_CMU_OFFSET, 0x1000);
+ *val = readl(addr + 0x610);
+ iounmap(addr);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(gxp_cmu_mux2_fops, gxp_cmu_mux2_get, gxp_cmu_mux2_set,
+ "%llu\n");
+
void gxp_create_debugfs(struct gxp_dev *gxp)
{
gxp->d_entry = debugfs_create_dir("gxp", NULL);
if (IS_ERR_OR_NULL(gxp->d_entry))
return;
+ mutex_init(&gxp->debugfs_client_lock);
gxp->debugfs_wakelock_held = false;
debugfs_create_file("lpm_test", 0200, gxp->d_entry, gxp,
@@ -376,12 +469,21 @@ void gxp_create_debugfs(struct gxp_dev *gxp)
debugfs_create_file("log", 0600, gxp->d_entry, gxp, &gxp_log_buff_fops);
debugfs_create_file("log_eventfd", 0200, gxp->d_entry, gxp,
&gxp_log_eventfd_signal_fops);
+ debugfs_create_file("cmumux1", 0600, gxp->d_entry, gxp,
+ &gxp_cmu_mux1_fops);
+ debugfs_create_file("cmumux2", 0600, gxp->d_entry, gxp,
+ &gxp_cmu_mux2_fops);
}
void gxp_remove_debugfs(struct gxp_dev *gxp)
{
+ debugfs_remove_recursive(gxp->d_entry);
+
+ /*
+ * Now that debugfs is torn down, and no other calls to
+ * `gxp_firmware_run_set()` can occur, destroy any client that may have
+ * been left running.
+ */
if (gxp->debugfs_client)
gxp_client_destroy(gxp->debugfs_client);
-
- debugfs_remove_recursive(gxp->d_entry);
}
diff --git a/gxp-internal.h b/gxp-internal.h
index c5ee20d..6ee9ee6 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -16,6 +16,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include "gxp-config.h"
@@ -27,15 +28,6 @@ struct gxp_tpu_mbx_desc {
size_t cmdq_size, respq_size;
};
-/* Holds state belonging to a client */
-struct gxp_client {
- struct gxp_dev *gxp;
- void *app;
- bool vd_allocated;
- bool tpu_mbx_allocated;
- struct gxp_tpu_mbx_desc mbx_desc;
-};
-
/* ioremapped resource */
struct gxp_mapped_resource {
void __iomem *vaddr; /* starting virtual address */
@@ -56,6 +48,7 @@ struct gxp_tpu_dev {
};
/* Forward declarations from submodules */
+struct gxp_client;
struct gxp_mailbox_manager;
struct gxp_debug_dump_manager;
struct gxp_mapping_root;
@@ -102,8 +95,9 @@ struct gxp_dev {
* without running them on physical cores.
*/
struct rw_semaphore vd_semaphore;
- struct gxp_client *core_to_client[GXP_NUM_CORES];
+ struct gxp_virtual_device *core_to_vd[GXP_NUM_CORES];
struct gxp_client *debugfs_client;
+ struct mutex debugfs_client_lock;
bool debugfs_wakelock_held;
struct gxp_thermal_manager *thermal_mgr;
struct gxp_dma_manager *dma_mgr;
diff --git a/gxp-platform.c b/gxp-platform.c
index 96a3a76..35634c0 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -27,6 +27,7 @@
#endif
#include "gxp.h"
+#include "gxp-client.h"
#include "gxp-debug-dump.h"
#include "gxp-debugfs.h"
#include "gxp-dma.h"
@@ -66,7 +67,6 @@ static int gxp_open(struct inode *inode, struct file *file)
struct gxp_client *client;
struct gxp_dev *gxp = container_of(file->private_data, struct gxp_dev,
misc_dev);
- int ret = 0;
client = gxp_client_create(gxp);
if (IS_ERR(client))
@@ -74,19 +74,12 @@ static int gxp_open(struct inode *inode, struct file *file)
file->private_data = client;
- ret = gxp_wakelock_acquire(gxp);
- if (ret) {
- gxp_client_destroy(client);
- file->private_data = NULL;
- }
-
- return ret;
+ return 0;
}
static int gxp_release(struct inode *inode, struct file *file)
{
struct gxp_client *client = file->private_data;
- struct gxp_dev *gxp;
/*
* If open failed and no client was created then no clean-up is needed.
@@ -94,16 +87,12 @@ static int gxp_release(struct inode *inode, struct file *file)
if (!client)
return 0;
- gxp = client->gxp;
-
/*
* TODO (b/184572070): Unmap buffers and drop mailbox responses
* belonging to the client
*/
gxp_client_destroy(client);
- gxp_wakelock_release(gxp);
-
return 0;
}
@@ -133,11 +122,6 @@ static int gxp_map_buffer(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
- client, ibuf.virtual_core_list);
- if (phys_core_list == 0)
- return -EINVAL;
-
if (ibuf.size == 0)
return -EINVAL;
@@ -147,6 +131,23 @@ static int gxp_map_buffer(struct gxp_client *client,
return -EINVAL;
}
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_MAP_BUFFER requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
+ client->vd, ibuf.virtual_core_list);
+ if (phys_core_list == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
#ifndef CONFIG_GXP_HAS_SYSMMU
/*
* TODO(b/193272602) On systems without a SysMMU, all attempts to map
@@ -160,19 +161,23 @@ static int gxp_map_buffer(struct gxp_client *client,
map = gxp_mapping_get_host(gxp, ibuf.host_address);
if (map) {
ibuf.device_address = map->device_address;
- if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- return -EFAULT;
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
+ ret = -EFAULT;
+ goto out
+ }
map->map_count++;
- return ret;
+ goto out;
}
#endif
map = gxp_mapping_create(gxp, phys_core_list, ibuf.host_address,
ibuf.size, /*gxp_dma_flags=*/0,
mapping_flags_to_dma_dir(ibuf.flags));
- if (IS_ERR(map))
- return PTR_ERR(map);
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
+ goto out;
+ }
ret = gxp_mapping_put(gxp, map);
if (ret)
@@ -185,12 +190,16 @@ static int gxp_map_buffer(struct gxp_client *client,
goto error_remove;
}
+out:
+ up_read(&client->semaphore);
+
return ret;
error_remove:
gxp_mapping_remove(gxp, map);
error_destroy:
gxp_mapping_destroy(gxp, map);
+ up_read(&client->semaphore);
devm_kfree(gxp->dev, (void *)map);
return ret;
}
@@ -206,17 +215,32 @@ static int gxp_unmap_buffer(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_UNMAP_BUFFER requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
map = gxp_mapping_get(gxp, ibuf.device_address);
- if (!map)
- return -EINVAL;
+ if (!map) {
+ ret = -EINVAL;
+ goto out;
+ }
WARN_ON(map->host_address != ibuf.host_address);
if (--(map->map_count))
- return ret;
+ goto out;
gxp_mapping_remove(gxp, map);
gxp_mapping_destroy(gxp, map);
+out:
+ up_read(&client->semaphore);
+
return ret;
}
@@ -226,16 +250,34 @@ static int gxp_sync_buffer(struct gxp_client *client,
struct gxp_dev *gxp = client->gxp;
struct gxp_sync_ioctl ibuf;
struct gxp_mapping *map;
+ int ret;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_SYNC_BUFFER requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
map = gxp_mapping_get(gxp, ibuf.device_address);
- if (!map)
- return -EINVAL;
+ if (!map) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = gxp_mapping_sync(gxp, map, ibuf.offset, ibuf.size,
+ ibuf.flags == GXP_SYNC_FOR_CPU);
- return gxp_mapping_sync(gxp, map, ibuf.offset, ibuf.size,
- ibuf.flags == GXP_SYNC_FOR_CPU);
+out:
+ up_read(&client->semaphore);
+
+ return ret;
}
static int gxp_mailbox_command(struct gxp_client *client,
@@ -254,26 +296,39 @@ static int gxp_mailbox_command(struct gxp_client *client,
return -EFAULT;
}
- phys_core = gxp_vd_virt_core_to_phys_core(client, ibuf.virtual_core_id);
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_MAILBOX_COMMAND requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev,
"Mailbox command failed: Invalid virtual core id (%u)\n",
ibuf.virtual_core_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (!gxp_is_fw_running(gxp, phys_core)) {
dev_err(gxp->dev,
"Cannot process mailbox command for core %d when firmware isn't running\n",
phys_core);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (gxp->mailbox_mgr == NULL || gxp->mailbox_mgr->mailboxes == NULL ||
gxp->mailbox_mgr->mailboxes[phys_core] == NULL) {
dev_err(gxp->dev, "Mailbox not initialized for core %d\n",
phys_core);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
/* Pack the command structure */
@@ -294,16 +349,20 @@ static int gxp_mailbox_command(struct gxp_client *client,
if (ret) {
dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
ret);
- return ret;
+ goto out;
}
ibuf.sequence_number = cmd.seq;
if (copy_to_user(argp, &ibuf, sizeof(ibuf))) {
dev_err(gxp->dev, "Failed to copy back sequence number!\n");
- return -EFAULT;
+ ret = -EFAULT;
+ goto out;
}
- return 0;
+out:
+ up_read(&client->semaphore);
+
+ return ret;
}
static int gxp_mailbox_response(struct gxp_client *client,
@@ -315,22 +374,35 @@ static int gxp_mailbox_response(struct gxp_client *client,
struct gxp_async_response *resp_ptr;
int phys_core;
unsigned long flags;
+ int ret = 0;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
+ /* Caller must hold VIRTUAL_DEVICE wakelock */
+ down_read(&client->semaphore);
+
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "GXP_MAILBOX_RESPONSE requires the client hold a VIRTUAL_DEVICE wakelock\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
virtual_core_id = ibuf.virtual_core_id;
- phys_core = gxp_vd_virt_core_to_phys_core(client, virtual_core_id);
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Mailbox response failed: Invalid virtual core id (%u)\n",
virtual_core_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (!gxp_is_fw_running(gxp, phys_core)) {
dev_err(gxp->dev, "Cannot process mailbox response for core %d when firmware isn't running\n",
phys_core);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
spin_lock_irqsave(&gxp->mailbox_resps_lock, flags);
@@ -397,9 +469,12 @@ static int gxp_mailbox_response(struct gxp_client *client,
kfree(resp_ptr);
if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
- return -EFAULT;
+ ret = -EFAULT;
- return 0;
+out:
+ up_read(&client->semaphore);
+
+ return ret;
}
static int gxp_get_specs(struct gxp_client *client,
@@ -425,6 +500,7 @@ static int gxp_allocate_vd(struct gxp_client *client,
{
struct gxp_dev *gxp = client->gxp;
struct gxp_virtual_device_ioctl ibuf;
+ struct gxp_virtual_device *vd;
int ret = 0;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
@@ -435,15 +511,26 @@ static int gxp_allocate_vd(struct gxp_client *client,
return -EINVAL;
}
- down_write(&gxp->vd_semaphore);
- if (client->vd_allocated) {
- up_write(&gxp->vd_semaphore);
+ down_write(&client->semaphore);
+
+ if (client->vd) {
dev_err(gxp->dev, "Virtual device was already allocated for client\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- ret = gxp_vd_allocate(client, ibuf.core_count);
- up_write(&gxp->vd_semaphore);
+ vd = gxp_vd_allocate(gxp, ibuf.core_count);
+ if (IS_ERR(vd)) {
+ dev_err(gxp->dev,
+ "Failed to allocate virtual device for client (%ld)\n",
+ PTR_ERR(vd));
+ goto out;
+ }
+
+ client->vd = vd;
+
+out:
+ up_write(&client->semaphore);
return ret;
}
@@ -474,7 +561,7 @@ gxp_etm_trace_start_command(struct gxp_client *client,
if (ibuf.pc_match_mask_length > ETM_TRACE_PC_MATCH_MASK_LEN_MAX)
return -EINVAL;
- phys_core = gxp_vd_virt_core_to_phys_core(client, ibuf.virtual_core_id);
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Trace start failed: Invalid virtual core id (%u)\n",
ibuf.virtual_core_id);
@@ -501,7 +588,7 @@ static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
return -EFAULT;
- phys_core = gxp_vd_virt_core_to_phys_core(client, virtual_core_id);
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Trace stop via software trigger failed: Invalid virtual core id (%u)\n",
virtual_core_id);
@@ -527,7 +614,7 @@ static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
return -EFAULT;
- phys_core = gxp_vd_virt_core_to_phys_core(client, virtual_core_id);
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Trace cleanup failed: Invalid virtual core id (%u)\n",
virtual_core_id);
@@ -560,7 +647,7 @@ gxp_etm_get_trace_info_command(struct gxp_client *client,
if (ibuf.type > 1)
return -EINVAL;
- phys_core = gxp_vd_virt_core_to_phys_core(client, ibuf.virtual_core_id);
+ phys_core = gxp_vd_virt_core_to_phys_core(client->vd, ibuf.virtual_core_id);
if (phys_core < 0) {
dev_err(gxp->dev, "Get trace info failed: Invalid virtual core id (%u)\n",
ibuf.virtual_core_id);
@@ -653,7 +740,7 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
virtual_core_list = ibuf.virtual_core_list;
core_count = hweight_long(virtual_core_list);
phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
- client, virtual_core_list);
+ client->vd, virtual_core_list);
if (!phys_core_list) {
dev_err(gxp->dev, "%s: invalid virtual core list 0x%x\n",
__func__, virtual_core_list);
@@ -667,7 +754,7 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
if (!mbx_info)
return -ENOMEM;
- down_write(&gxp->vd_semaphore);
+ down_write(&client->semaphore);
if (client->tpu_mbx_allocated) {
dev_err(gxp->dev, "%s: Mappings already exist for TPU mailboxes\n",
@@ -708,7 +795,7 @@ static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
client->tpu_mbx_allocated = true;
error:
- up_write(&gxp->vd_semaphore);
+ up_write(&client->semaphore);
kfree(mbx_info);
return ret;
@@ -729,7 +816,7 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
- down_write(&gxp->vd_semaphore);
+ down_write(&client->semaphore);
if (!client->tpu_mbx_allocated) {
dev_err(gxp->dev, "%s: No mappings exist for TPU mailboxes\n",
@@ -753,7 +840,7 @@ static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
client->tpu_mbx_allocated = false;
out:
- up_write(&gxp->vd_semaphore);
+ up_write(&client->semaphore);
return ret;
#else
@@ -814,6 +901,137 @@ static int gxp_read_global_counter(struct gxp_client *client,
return 0;
}
+static int gxp_acquire_wake_lock(struct gxp_client *client,
+ struct gxp_acquire_wakelock_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_acquire_wakelock_ioctl ibuf;
+ bool acquired_block_wakelock = false;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ /* Acquire a BLOCK wakelock if requested */
+ if (ibuf.components_to_wake & WAKELOCK_BLOCK) {
+ if (!client->has_block_wakelock) {
+ ret = gxp_wakelock_acquire(gxp);
+ acquired_block_wakelock = true;
+ }
+
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to acquire BLOCK wakelock for client (ret=%d)\n",
+ ret);
+ goto out;
+ }
+
+ client->has_block_wakelock = true;
+ }
+
+ /* Acquire a VIRTUAL_DEVICE wakelock if requested */
+ if (ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) {
+ if (!client->has_block_wakelock) {
+ dev_err(gxp->dev,
+ "Must hold BLOCK wakelock to acquire VIRTUAL_DEVICE wakelock\n");
+ ret = -EINVAL;
+ goto out;
+
+ }
+
+ if (!client->has_vd_wakelock) {
+ down_write(&gxp->vd_semaphore);
+ ret = gxp_vd_start(client->vd);
+ up_write(&gxp->vd_semaphore);
+ }
+
+ if (ret) {
+ dev_err(gxp->dev,
+ "Failed to acquire VIRTUAL_DEVICE wakelock for client (ret=%d)\n",
+ ret);
+ goto err_acquiring_vd_wl;
+ }
+
+ client->has_vd_wakelock = true;
+ }
+
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+
+err_acquiring_vd_wl:
+ /*
+ * In a single call, if any wakelock acquisition fails, all of them do.
+ * If the client was acquiring both wakelocks and failed to acquire the
+ * VIRTUAL_DEVICE wakelock after successfully acquiring the BLOCK
+ * wakelock, then release it before returning the error code.
+ */
+ if (acquired_block_wakelock) {
+ gxp_wakelock_release(gxp);
+ client->has_block_wakelock = false;
+ }
+
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
+static int gxp_release_wake_lock(struct gxp_client *client, __u32 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ u32 wakelock_components;
+ int ret = 0;
+
+ if (copy_from_user(&wakelock_components, argp,
+ sizeof(wakelock_components)))
+ return -EFAULT;
+
+ down_write(&client->semaphore);
+
+ if (wakelock_components & WAKELOCK_VIRTUAL_DEVICE) {
+ if (!client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "Client must hold a VIRTUAL_DEVICE wakelock to release one\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ down_write(&gxp->vd_semaphore);
+ gxp_vd_stop(client->vd);
+ up_write(&gxp->vd_semaphore);
+
+ client->has_vd_wakelock = false;
+ }
+
+ if (wakelock_components & WAKELOCK_BLOCK) {
+ if (client->has_vd_wakelock) {
+ dev_err(gxp->dev,
+ "Client cannot release BLOCK wakelock while holding a VD wakelock\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!client->has_block_wakelock) {
+ dev_err(gxp->dev,
+ "Client must hold a BLOCK wakelock to release one\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ gxp_wakelock_release(gxp);
+
+ client->has_block_wakelock = false;
+ }
+
+out:
+ up_write(&client->semaphore);
+
+ return ret;
+}
+
static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
{
struct gxp_client *client = file->private_data;
@@ -875,6 +1093,12 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_READ_GLOBAL_COUNTER:
ret = gxp_read_global_counter(client, argp);
break;
+ case GXP_ACQUIRE_WAKE_LOCK:
+ ret = gxp_acquire_wake_lock(client, argp);
+ break;
+ case GXP_RELEASE_WAKE_LOCK:
+ ret = gxp_release_wake_lock(client, argp);
+ break;
default:
ret = -ENOTTY; /* unknown command */
}
diff --git a/gxp-vd.c b/gxp-vd.c
index bc5a6e3..7108797 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -10,7 +10,6 @@
#include <linux/slab.h>
#include <linux/types.h>
-#include "gxp-dma.h"
#include "gxp-firmware.h"
#include "gxp-firmware-data.h"
#include "gxp-internal.h"
@@ -25,7 +24,7 @@ int gxp_vd_init(struct gxp_dev *gxp)
/* All cores start as free */
for (core = 0; core < GXP_NUM_CORES; core++)
- gxp->core_to_client[core] = NULL;
+ gxp->core_to_vd[core] = NULL;
ret = gxp_fw_init(gxp);
@@ -41,65 +40,64 @@ void gxp_vd_destroy(struct gxp_dev *gxp)
up_write(&gxp->vd_semaphore);
}
-/* Caller must hold gxp->vd_semaphore for writing */
-static void gxp_vd_release(struct gxp_client *client)
+struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp, u16 requested_cores)
{
- uint core;
- struct gxp_dev *gxp = client->gxp;
+ struct gxp_virtual_device *vd;
+
+ /* Assumes 0 < requested_cores <= GXP_NUM_CORES */
+ if (requested_cores == 0 || requested_cores > GXP_NUM_CORES)
+ return ERR_PTR(-EINVAL);
+
+ vd = kzalloc(sizeof(*vd), GFP_KERNEL);
+ if (!vd)
+ return ERR_PTR(-ENOMEM);
+
+ vd->gxp = gxp;
+ vd->num_cores = requested_cores;
/*
- * Put all cores in the VD into reset so they can not wake each other up
+ * TODO(b/209083969) Initialize VD aux domain here to support VD
+ * suspend/resume and mapping without a VIRTUAL_DEVICE wakelock.
*/
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_client[core] == client) {
- gxp_write_32_core(
- gxp, core, GXP_REG_ETM_PWRCTL,
- 1 << GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT);
- }
- }
- for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_client[core] == client) {
- gxp->core_to_client[core] = NULL;
- gxp_firmware_stop(gxp, core);
- }
- }
- if (client->app) {
- gxp_fw_data_destroy_app(gxp, client->app);
- client->app = NULL;
- }
+ return vd;
+}
+
+void gxp_vd_release(struct gxp_virtual_device *vd)
+{
+ /*
+ * TODO(b/209083969) Cleanup VD aux domain once it's created in
+ * gxp_vd_allocate().
+ */
+
+ kfree(vd);
}
/* Caller must hold gxp->vd_semaphore for writing */
-int gxp_vd_allocate(struct gxp_client *client, u16 requested_cores)
+int gxp_vd_start(struct gxp_virtual_device *vd)
{
- struct gxp_dev *gxp = client->gxp;
+ struct gxp_dev *gxp = vd->gxp;
uint core;
- int available_cores = 0;
- int cores_remaining = requested_cores;
+ uint available_cores = 0;
+ uint cores_remaining = vd->num_cores;
uint core_list = 0;
int ret = 0;
- /* Assumes 0 < requested_cores <= GXP_NUM_CORES */
- WARN_ON(requested_cores == 0 || requested_cores > GXP_NUM_CORES);
- /* Assumes client has not called gxp_vd_allocate */
- WARN_ON(client->vd_allocated);
-
for (core = 0; core < GXP_NUM_CORES; core++) {
- if (gxp->core_to_client[core] == NULL) {
- if (available_cores < requested_cores)
+ if (gxp->core_to_vd[core] == NULL) {
+ if (available_cores < vd->num_cores)
core_list |= BIT(core);
available_cores++;
}
}
- if (available_cores < requested_cores) {
- dev_err(gxp->dev, "Insufficient available cores. Available: %d. Requested: %u\n",
- available_cores, requested_cores);
+ if (available_cores < vd->num_cores) {
+ dev_err(gxp->dev, "Insufficient available cores. Available: %u. Requested: %u\n",
+ available_cores, vd->num_cores);
return -EBUSY;
}
- client->app = gxp_fw_data_create_app(gxp, core_list);
+ vd->fw_app = gxp_fw_data_create_app(gxp, core_list);
for (core = 0; core < GXP_NUM_CORES; core++) {
if (cores_remaining == 0)
@@ -110,52 +108,81 @@ int gxp_vd_allocate(struct gxp_client *client, u16 requested_cores)
if (ret) {
dev_err(gxp->dev, "Failed to run firmware on core %u\n",
core);
- goto out_vd_release;
+ goto out_vd_stop;
}
- gxp->core_to_client[core] = client;
+ gxp->core_to_vd[core] = vd;
cores_remaining--;
}
}
if (cores_remaining != 0) {
- dev_err(gxp->dev, "Internal error: Failed to allocate %u requested cores. %d cores remaining\n",
- requested_cores, cores_remaining);
+ dev_err(gxp->dev,
+ "Internal error: Failed to start %u requested cores. %u cores remaining\n",
+ vd->num_cores, cores_remaining);
/*
* Should never reach here. Previously verified that enough
* cores are available.
*/
WARN_ON(true);
ret = -EIO;
- goto out_vd_release;
+ goto out_vd_stop;
}
- client->vd_allocated = true;
return ret;
-out_vd_release:
- gxp_vd_release(client);
+out_vd_stop:
+ gxp_vd_stop(vd);
return ret;
+
}
-int gxp_vd_virt_core_to_phys_core(struct gxp_client *client, u16 virt_core)
+/* Caller must hold gxp->vd_semaphore for writing */
+void gxp_vd_stop(struct gxp_virtual_device *vd)
{
- struct gxp_dev *gxp = client->gxp;
- uint phys_core;
- uint virt_core_index = 0;
+ struct gxp_dev *gxp = vd->gxp;
+ uint core;
- down_read(&gxp->vd_semaphore);
+ /*
+ * Put all cores in the VD into reset so they can not wake each other up
+ */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd) {
+ gxp_write_32_core(
+ gxp, core, GXP_REG_ETM_PWRCTL,
+ 1 << GXP_REG_ETM_PWRCTL_CORE_RESET_SHIFT);
+ }
+ }
- if (!client->vd_allocated) {
- up_read(&gxp->vd_semaphore);
- dev_dbg(gxp->dev, "Client has not allocated a virtual device\n");
- return -EINVAL;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (gxp->core_to_vd[core] == vd) {
+ gxp->core_to_vd[core] = NULL;
+ gxp_firmware_stop(gxp, core);
+ }
+ }
+
+ if (vd->fw_app) {
+ gxp_fw_data_destroy_app(gxp, vd->fw_app);
+ vd->fw_app = NULL;
}
+}
+
+/*
+ * Helper function for use in both `gxp_vd_virt_core_to_phys_core()` and
+ * `gxp_vd_virt_core_list_to_phys_core_list()`.
+ *
+ * Caller must have locked `gxp->vd_semaphore` for reading.
+ */
+static int virt_core_to_phys_core_locked(struct gxp_virtual_device *vd,
+ u16 virt_core)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ uint phys_core;
+ uint virt_core_index = 0;
for (phys_core = 0; phys_core < GXP_NUM_CORES; phys_core++) {
- if (gxp->core_to_client[phys_core] == client) {
+ if (gxp->core_to_vd[phys_core] == vd) {
if (virt_core_index == virt_core) {
/* Found virtual core */
- up_read(&gxp->vd_semaphore);
return phys_core;
}
@@ -163,18 +190,32 @@ int gxp_vd_virt_core_to_phys_core(struct gxp_client *client, u16 virt_core)
}
}
- up_read(&gxp->vd_semaphore);
dev_dbg(gxp->dev, "No mapping for virtual core %u\n", virt_core);
return -EINVAL;
}
-uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_client *client,
+int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core)
+{
+ struct gxp_dev *gxp = vd->gxp;
+ int ret;
+
+ down_read(&gxp->vd_semaphore);
+ ret = virt_core_to_phys_core_locked(vd, virt_core);
+ up_read(&gxp->vd_semaphore);
+
+ return ret;
+}
+
+uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
u16 virt_core_list)
{
+ struct gxp_dev *gxp = vd->gxp;
uint phys_core_list = 0;
uint virt_core = 0;
int phys_core;
+ down_read(&gxp->vd_semaphore);
+
while (virt_core_list) {
/*
* Get the next virt core by finding the index of the first
@@ -187,48 +228,18 @@ uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_client *client,
virt_core = ffs(virt_core_list) - 1;
/* Any invalid virt cores invalidate the whole list */
- phys_core = gxp_vd_virt_core_to_phys_core(client, virt_core);
- if (phys_core < 0)
- return 0;
+ phys_core = virt_core_to_phys_core_locked(vd, virt_core);
+ if (phys_core < 0) {
+ phys_core_list = 0;
+ goto out;
+ }
phys_core_list |= BIT(phys_core);
virt_core_list &= ~BIT(virt_core);
}
- return phys_core_list;
-}
-
-struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
-{
- struct gxp_client *client;
-
- client = kmalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- return ERR_PTR(-ENOMEM);
-
- client->gxp = gxp;
- client->vd_allocated = false;
- client->app = NULL;
- client->tpu_mbx_allocated = false;
- return client;
-}
-
-void gxp_client_destroy(struct gxp_client *client)
-{
- struct gxp_dev *gxp = client->gxp;
-
- down_write(&gxp->vd_semaphore);
-
-#if IS_ENABLED(CONFIG_ANDROID) && !IS_ENABLED(CONFIG_GXP_GEM5)
- /*
- * Unmap TPU buffers, if the mapping is already removed, this
- * is a no-op.
- */
- gxp_dma_unmap_tpu_buffer(gxp, client->mbx_desc);
-#endif // CONFIG_ANDROID && !CONFIG_GXP_GEM5
- gxp_vd_release(client);
-
- up_write(&gxp->vd_semaphore);
+out:
+ up_read(&gxp->vd_semaphore);
- kfree(client);
+ return phys_core_list;
}
diff --git a/gxp-vd.h b/gxp-vd.h
index 8180669..ac7ec5f 100644
--- a/gxp-vd.h
+++ b/gxp-vd.h
@@ -11,6 +11,11 @@
#include "gxp-internal.h"
+struct gxp_virtual_device {
+ struct gxp_dev *gxp;
+ uint num_cores;
+ void *fw_app;
+};
/*
* TODO(b/193180931) cleanup the relationship between the internal GXP modules.
@@ -22,36 +27,65 @@
* This is expected to be called once per driver lifecycle.
*/
int gxp_vd_init(struct gxp_dev *gxp);
+
/*
* Tears down the device management subsystem.
* This is expected to be called once per driver lifecycle.
*/
void gxp_vd_destroy(struct gxp_dev *gxp);
-/*
- * Allocates a virtual device on the requested cores for the specified client.
- * This will also load the FW on, and boot up, the requested cores.
+
+/**
+ * gxp_vd_allocate() - Allocate and initialize a struct gxp_virtual_device
+ * @gxp: The GXP device the virtual device will belong to
+ * @requested_cores: The number of cores the virtual device will have
+ *
+ * Return: The virtual address of the virtual device or an ERR_PTR on failure
+ * * -EINVAL - The number of requested cores was invalid
+ * * -ENOMEM - Unable to allocate the virtual device
+ */
+struct gxp_virtual_device *gxp_vd_allocate(struct gxp_dev *gxp, u16 requested_cores);
+
+/**
+ * gxp_vd_release() - Cleanup and free a struct gxp_virtual_device
+ * @vd: The virtual device to be released
+ *
+ * A virtual device must be stopped before it can be released.
+ */
+void gxp_vd_release(struct gxp_virtual_device *vd);
+
+/**
+ * gxp_vd_start() - Run a virtual device on physical cores
+ * @vd: The virtual device to start
+ *
+ * The caller must have locked gxp->vd_semaphore for writing.
+ *
+ * Return:
+ * * 0 - Success
+ * * -EBUSY - Insufficient physical cores were free to start @vd
*/
-int gxp_vd_allocate(struct gxp_client *client, u16 requested_cores);
+int gxp_vd_start(struct gxp_virtual_device *vd);
+
+/**
+ * gxp_vd_stop() - Stop a running virtual device and free up physical cores
+ * @vd: The virtual device to stop
+ *
+ * The caller must have locked gxp->vd_semaphore for writing.
+ */
+void gxp_vd_stop(struct gxp_virtual_device *vd);
+
/*
* Returns the physical core ID for the specified virtual_core belonging to
* this virtual device.
*/
-int gxp_vd_virt_core_to_phys_core(struct gxp_client *client, u16 virt_core);
+int gxp_vd_virt_core_to_phys_core(struct gxp_virtual_device *vd, u16 virt_core);
+
/*
* Converts a bitfield of virtual core IDs to a bitfield of physical core IDs.
*
* If the virtual list contains any invalid IDs, the entire physical ID list
* will be considered invalid and this function will return 0.
*/
-uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_client *client,
+uint gxp_vd_virt_core_list_to_phys_core_list(struct gxp_virtual_device *vd,
u16 virt_core_list);
-/*
- * Allocates and initializes a client container to represent a virtual device.
- */
-struct gxp_client *gxp_client_create(struct gxp_dev *gxp);
-/*
- * Frees up the client container representing a virtual device.
- */
-void gxp_client_destroy(struct gxp_client *client);
#endif /* __GXP_VD_H__ */
diff --git a/gxp.h b/gxp.h
index 4861d52..868e88e 100644
--- a/gxp.h
+++ b/gxp.h
@@ -30,6 +30,15 @@
#define GXP_MAP_DMA_TO_DEVICE 1
#define GXP_MAP_DMA_FROM_DEVICE 2
+/*
+ * TODO(b/209083969) The following IOCTLs will no longer require the caller
+ * to hold a virtual device wakelock to call them once virtual device
+ * suspend/resume is implemented:
+ * - GXP_MAP_BUFFER
+ * - GXP_UNMAP_BUFFER
+ * - GXP_SYNC_BUFFER
+ */
+
struct gxp_map_ioctl {
/*
* Bitfield indicating which virtual cores to map the buffer for.
@@ -58,7 +67,11 @@ struct gxp_map_ioctl {
__u64 device_address; /* returned device address */
};
-/* Map host buffer. */
+/*
+ * Map host buffer.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
#define GXP_MAP_BUFFER \
_IOWR(GXP_IOCTL_BASE, 0, struct gxp_map_ioctl)
@@ -68,6 +81,8 @@ struct gxp_map_ioctl {
* Only the @device_address field will be used. Other fields will be fetched
* from the kernel's internal records. It is recommended to use the argument
* that was passed in GXP_MAP_BUFFER to un-map the buffer.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
*/
#define GXP_UNMAP_BUFFER \
_IOW(GXP_IOCTL_BASE, 1, struct gxp_map_ioctl)
@@ -105,6 +120,8 @@ struct gxp_sync_ioctl {
/*
* Sync buffer previously mapped by GXP_MAP_BUFFER.
*
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ *
* EINVAL: If a mapping for @device_address is not found.
* EINVAL: If @size equals 0.
* EINVAL: If @offset plus @size exceeds the mapping size.
@@ -143,7 +160,11 @@ struct gxp_mailbox_command_ioctl {
__u32 flags;
};
-/* Push element to the mailbox commmand queue. */
+/*
+ * Push element to the mailbox commmand queue.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
+ */
#define GXP_MAILBOX_COMMAND \
_IOW(GXP_IOCTL_BASE, 3, struct gxp_mailbox_command_ioctl)
@@ -182,6 +203,8 @@ struct gxp_mailbox_response_ioctl {
/*
* Pop element from the mailbox response queue. Blocks until mailbox response
* is available.
+ *
+ * The client must hold a VIRTUAL_DEVICE wakelock.
*/
#define GXP_MAILBOX_RESPONSE \
_IOWR(GXP_IOCTL_BASE, 4, struct gxp_mailbox_response_ioctl)
@@ -418,4 +441,119 @@ struct gxp_register_telemetry_eventfd_ioctl {
*/
#define GXP_READ_GLOBAL_COUNTER _IOR(GXP_IOCTL_BASE, 17, __u64)
+/*
+ * Components for which a client may hold a wakelock.
+ * Acquired by passing these values as `components_to_wake` in
+ * `struct gxp_acquire_wakelock_ioctl` to GXP_ACQUIRE_WAKELOCK and released by
+ * passing these values directly as the argument to GXP_RELEASE_WAKELOCK.
+ *
+ * Multiple wakelocks can be acquired or released at once by passing multiple
+ * components, ORed together.
+ */
+#define WAKELOCK_BLOCK (1 << 0)
+#define WAKELOCK_VIRTUAL_DEVICE (1 << 1)
+
+/*
+ * DSP subsystem Power state values for use as `gxp_power_state` in
+ * `struct gxp_acquire_wakelock_ioctl`
+ */
+#define GXP_POWER_STATE_OFF 0
+#define GXP_POWER_STATE_UUD 1
+#define GXP_POWER_STATE_SUD 2
+#define GXP_POWER_STATE_UD 3
+#define GXP_POWER_STATE_NOM 4
+
+/*
+ * Memory interface power state values for use as `memory_power_state` in
+ * `struct gxp_acquire_wakelock_ioctl`.
+ */
+#define MEMORY_POWER_STATE_UNDEFINED 0
+#define MEMORY_POWER_STATE_MIN 1
+#define MEMORY_POWER_STATE_VERY_LOW 2
+#define MEMORY_POWER_STATE_LOW 3
+#define MEMORY_POWER_STATE_HIGH 4
+#define MEMORY_POWER_STATE_VERY_HIGH 5
+#define MEMORY_POWER_STATE_MAX 6
+
+struct gxp_acquire_wakelock_ioctl {
+ /*
+ * The components for which a wakelock will be acquired.
+ * Should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
+ * bitwise OR of both.
+ *
+ * A VIRTUAL_DEVICE wakelock cannot be acquired until the client has
+ * allocated a virtual device. To acquire a VIRTUAL_DEVICE wakelock, a
+ * client must already have acquired a BLOCK wakelock or acquire both
+ * in the same call.
+ */
+ __u32 components_to_wake;
+ /*
+ * Minimum power state to operate the entire DSP subsystem at until
+ * the wakelock is released. One of the GXP_POWER_STATE_* defines
+ * from above.
+ *
+ * `GXP_POWER_STATE_OFF` is not a valid value when acquiring a
+ * wakelock.
+ */
+ __u32 gxp_power_state;
+ /*
+ * Memory interface power state to request from the system so long as
+ * the wakelock is held. One of the MEMORY_POWER_STATE* defines from
+ * above.
+ *
+ * If `MEMORY_POWER_STATE_UNDEFINED` is passed, no request to change
+ * the memory interface power state will be made.
+ */
+ __u32 memory_power_state;
+ /*
+ * How long to wait, in microseconds, before returning if insufficient
+ * physical cores are available when attempting to acquire a
+ * VIRTUAL_DEVICE wakelock. A value of 0 indicates that the IOCTL
+ * should not wait at all if cores are not available.
+ */
+ __u32 vd_timeout_us;
+};
+
+/*
+ * Acquire a wakelock and request minimum power states for the DSP subsystem
+ * and the memory interface.
+ *
+ * Upon a successful return, the specified components will be powered on and if
+ * they were not already running at the specified or higher power states,
+ * requests will have been sent to transition both the DSP subsystem and
+ * memory interface to the specified states.
+ *
+ * If the same client invokes this IOCTL for the same component more than once
+ * without a corresponding call to `GXP_RELEASE_WAKE_LOCK` in between, the
+ * second call will update requested power states, but have no other effects.
+ * No additional call to `GXP_RELEASE_WAKE_LOCK` will be required.
+ *
+ * If a client attempts to acquire a VIRTUAL_DEVICE wakelock and there are
+ * insufficient physical cores available, the driver will wait up to
+ * `vd_timeout_us` microseconds, then return -EBUSY if sufficient cores were
+ * never made available. In this case, if both BLOCK and VIRTUAL_DEVICE
+ * wakelocks were being requested, neither will have been acquired.
+ */
+#define GXP_ACQUIRE_WAKE_LOCK \
+ _IOW(GXP_IOCTL_BASE, 18, struct gxp_acquire_wakelock_ioctl)
+
+/*
+ * Release a wakelock acquired via `GXP_ACQUIRE_WAKE_LOCK`.
+ *
+ * The argument should be one of WAKELOCK_BLOCK or WAKELOCK_VIRTUAL_DEVICE, or a
+ * bitwise OR of both.
+ *
+ * Upon releasing a VIRTUAL_DEVICE wakelock, a client's virtual device will be
+ * removed from physical cores. At that point the cores may be reallocated to
+ * another client or powered down.
+ *
+ * If no clients hold a BLOCK wakelock, the entire DSP subsytem may be powered
+ * down. If a client attempts to release a BLOCK wakelock while still holding
+ * a VIRTUAL_DEVICE wakelock, this IOCTL will return -EBUSY.
+ *
+ * If a client attempts to release a wakelock it does not hold, this IOCTL will
+ * return -ENODEV.
+ */
+#define GXP_RELEASE_WAKE_LOCK _IOW(GXP_IOCTL_BASE, 19, __u32)
+
#endif /* __GXP_H__ */