summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNeela Chithirala <chithiralan@google.com>2021-11-19 02:37:22 +0000
committerNeela Chithirala <chithiralan@google.com>2021-11-19 07:37:45 +0000
commite14069f1739b05c7a7f60ae73c8ce14b91ef12e0 (patch)
tree49b54f216dc0556c514ce692e13f3bfa53d442dc
parentde16475969ca7dfaecf7144ece32b929cf193861 (diff)
downloadgs201-e14069f1739b05c7a7f60ae73c8ce14b91ef12e0.tar.gz
Merge branch 'gs201-release' to android13-gs-pixel-5.10
* gs201-release: gxp: Initial commit for PM interface gxp: map and unmap TPU mailbox queues buffer gxp: unittests: add a gxp-debugfs-test.c gxp: unittests: add a gxp-platform-test.c gxp: unittests: add a gxp-vd-test.c gxp: unittests: add a gxp-firmware-test.c gxp: Update the logging/tracing device/host shared structures. gxp: Add ability to mmap telemetry buffers gxp: Move ETM trace_data off the stack gxp: Add IOCTL for configuring ETM registers gxp: Update copyright to "Google LLC" gxp: Remove NEED_SG_DMA_LENGTH from Kconfig gxp: unittests: fix kunit path in kokoro gxp: unittests: let LPM be always active gxp: unittests: add a fake firmware loader gxp: unittests: support device tree in unit tests gxp: unittests: use kokoro to execute tests Signed-off-by: Neela Chithirala <chithiralan@google.com> Change-Id: I8fa8dfaaa70d2a40503ff3bffe636380b69e443c
-rw-r--r--Makefile12
-rw-r--r--gxp-debugfs.c68
-rw-r--r--gxp-dma-iommu.c69
-rw-r--r--gxp-dma-rmem.c14
-rw-r--r--gxp-dma.h26
-rw-r--r--gxp-firmware-data.c50
-rw-r--r--gxp-firmware-data.h19
-rw-r--r--gxp-firmware.c2
-rw-r--r--gxp-host-device-structs.h39
-rw-r--r--gxp-internal.h13
-rw-r--r--gxp-lpm.c30
-rw-r--r--gxp-lpm.h15
-rw-r--r--gxp-mailbox.c2
-rw-r--r--gxp-platform.c390
-rw-r--r--gxp-pm.c283
-rw-r--r--gxp-pm.h190
-rw-r--r--gxp-telemetry.c384
-rw-r--r--gxp-telemetry.h80
-rw-r--r--gxp-tmp.h13
-rw-r--r--gxp-vd.c12
-rw-r--r--gxp.h173
21 files changed, 1815 insertions, 69 deletions
diff --git a/Makefile b/Makefile
index 77db0fb..5a4b17e 100644
--- a/Makefile
+++ b/Makefile
@@ -17,6 +17,8 @@ gxp-objs += \
gxp-mapping.o \
gxp-platform.o \
gxp-range-alloc.o \
+ gxp-pm.o \
+ gxp-telemetry.o \
gxp-vd.o
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
@@ -55,5 +57,15 @@ ccflags-y += -DCONFIG_GXP_$(GXP_PLATFORM)
KBUILD_OPTIONS += CONFIG_GXP=m
+ifdef CONFIG_GXP_TEST
+subdir-ccflags-y += -Wall -Werror
+obj-y += unittests/
+include $(srctree)/drivers/gxp/unittests/Makefile.include
+$(call include_test_path, $(gxp-objs))
+endif
+
+# Access TPU driver's exported symbols.
+KBUILD_EXTRA_SYMBOLS += ../google-modules/edgetpu/janeiro/drivers/edgetpu/Module.symvers
+
modules modules_install clean:
$(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 $(KBUILD_OPTIONS) $(@)
diff --git a/gxp-debugfs.c b/gxp-debugfs.c
index e8991ec..88b3190 100644
--- a/gxp-debugfs.c
+++ b/gxp-debugfs.c
@@ -11,8 +11,9 @@
#include "gxp-firmware.h"
#include "gxp-firmware-data.h"
#include "gxp-internal.h"
-#include "gxp-lpm.h"
+#include "gxp-pm.h"
#include "gxp-mailbox.h"
+#include "gxp-telemetry.h"
#include "gxp-vd.h"
static int gxp_debugfs_lpm_test(void *data, u64 val)
@@ -165,7 +166,7 @@ static int gxp_blk_powerstate_set(void *data, u64 val)
int ret = 0;
if (val >= AUR_DVFS_MIN_STATE) {
- ret = gxp_blk_set_state(gxp, val);
+ ret = gxp_pm_blk_set_state_acpm(gxp, val);
} else {
ret = -EINVAL;
dev_err(gxp->dev, "Incorrect state %llu\n", val);
@@ -177,7 +178,7 @@ static int gxp_blk_powerstate_get(void *data, u64 *val)
{
struct gxp_dev *gxp = (struct gxp_dev *)data;
- *val = gxp_blk_get_state(gxp);
+ *val = gxp_pm_blk_get_state_acpm(gxp);
return 0;
}
@@ -191,6 +192,66 @@ static int gxp_debugfs_coredump(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(gxp_coredump_fops, NULL, gxp_debugfs_coredump,
"%llu\n");
+static int gxp_log_buff_set(void *data, u64 val)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
+ int i;
+ u64 **buffers;
+ u64 *ptr;
+
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ if (!gxp->telemetry_mgr->logging_buff_data) {
+ dev_err(gxp->dev, "%s: Logging buffer has not been created\n",
+ __func__);
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+ return -ENODEV;
+ }
+
+ buffers = (u64 **)gxp->telemetry_mgr->logging_buff_data->buffers;
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ ptr = buffers[i];
+ *ptr = val;
+ }
+ dev_dbg(gxp->dev,
+ "%s: log buff first bytes: [0] = %llu, [1] = %llu, [2] = %llu, [3] = %llu (val=%llu)\n",
+ __func__, *buffers[0], *buffers[1], *buffers[2], *buffers[3],
+ val);
+
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+
+ return 0;
+}
+
+static int gxp_log_buff_get(void *data, u64 *val)
+{
+ struct gxp_dev *gxp = (struct gxp_dev *)data;
+ u64 **buffers;
+
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ if (!gxp->telemetry_mgr->logging_buff_data) {
+ dev_err(gxp->dev, "%s: Logging buffer has not been created\n",
+ __func__);
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+ return -ENODEV;
+ }
+
+ buffers = (u64 **)gxp->telemetry_mgr->logging_buff_data->buffers;
+ dev_dbg(gxp->dev,
+ "%s: log buff first bytes: [0] = %llu, [1] = %llu, [2] = %llu, [3] = %llu\n",
+ __func__, *buffers[0], *buffers[1], *buffers[2], *buffers[3]);
+
+ *val = *buffers[0];
+
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(gxp_log_buff_fops, gxp_log_buff_get, gxp_log_buff_set,
+ "%llu\n");
+
void gxp_create_debugfs(struct gxp_dev *gxp)
{
gxp->d_entry = debugfs_create_dir("gxp", NULL);
@@ -209,6 +270,7 @@ void gxp_create_debugfs(struct gxp_dev *gxp)
&gxp_blk_powerstate_fops);
debugfs_create_file("coredump", 0200, gxp->d_entry, gxp,
&gxp_coredump_fops);
+ debugfs_create_file("log", 0600, gxp->d_entry, gxp, &gxp_log_buff_fops);
}
void gxp_remove_debugfs(struct gxp_dev *gxp)
diff --git a/gxp-dma-iommu.c b/gxp-dma-iommu.c
index a81d9b1..dfb6dcf 100644
--- a/gxp-dma-iommu.c
+++ b/gxp-dma-iommu.c
@@ -445,6 +445,75 @@ alloc_sgt_for_buffer(void *ptr, size_t size,
return sgt;
}
+#ifdef CONFIG_ANDROID
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
+ struct edgetpu_ext_mailbox_info *mbx_info)
+{
+ struct gxp_dma_iommu_manager *mgr = container_of(
+ gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ uint orig_core_list = core_list;
+ u64 queue_iova;
+ int core;
+ int ret;
+ int i = 0;
+
+ while (core_list) {
+ phys_addr_t cmdq_pa = mbx_info->mailboxes[i].cmdq_pa;
+ phys_addr_t respq_pa = mbx_info->mailboxes[i++].respq_pa;
+
+ core = ffs(core_list) - 1;
+ core_list &= ~BIT(core);
+ queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
+ ret = iommu_map(mgr->core_domains[core], queue_iova,
+ cmdq_pa, mbx_info->cmdq_size, IOMMU_WRITE);
+ if (ret)
+ goto error;
+ ret = iommu_map(mgr->core_domains[core],
+ queue_iova + mbx_info->cmdq_size, respq_pa,
+ mbx_info->respq_size, IOMMU_READ);
+ if (ret) {
+ iommu_unmap(mgr->core_domains[core], queue_iova,
+ mbx_info->cmdq_size);
+ goto error;
+ }
+ }
+ return 0;
+
+error:
+ core_list ^= orig_core_list;
+ while (core_list) {
+ core = ffs(core_list) - 1;
+ core_list &= ~BIT(core);
+ queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
+ iommu_unmap(mgr->core_domains[core], queue_iova,
+ mbx_info->cmdq_size);
+ iommu_unmap(mgr->core_domains[core], queue_iova +
+ mbx_info->cmdq_size, mbx_info->respq_size);
+ }
+ return ret;
+}
+
+void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_tpu_mbx_desc mbx_desc)
+{
+ struct gxp_dma_iommu_manager *mgr = container_of(
+ gxp->dma_mgr, struct gxp_dma_iommu_manager, dma_mgr);
+ uint core_list = mbx_desc.phys_core_list;
+ u64 queue_iova;
+ int core;
+
+ while (core_list) {
+ core = ffs(core_list) - 1;
+ core_list &= ~BIT(core);
+ queue_iova = GXP_IOVA_TPU_MBX_BUFFER(core);
+ iommu_unmap(mgr->core_domains[core], queue_iova,
+ mbx_desc.cmdq_size);
+ iommu_unmap(mgr->core_domains[core], queue_iova +
+ mbx_desc.cmdq_size, mbx_desc.respq_size);
+ }
+}
+#endif // CONFIG_ANDROID
+
void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
uint gxp_dma_flags)
diff --git a/gxp-dma-rmem.c b/gxp-dma-rmem.c
index 4e1c2ef..504940d 100644
--- a/gxp-dma-rmem.c
+++ b/gxp-dma-rmem.c
@@ -181,6 +181,20 @@ void gxp_dma_unmap_resources(struct gxp_dev *gxp)
/* no mappings to undo */
}
+#ifdef CONFIG_ANDROID
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
+ struct edgetpu_ext_mailbox_info *mbx_info)
+{
+ return -ENODEV;
+}
+
+void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_tpu_mbx_desc mbx_desc)
+{
+ /* no mappings to undo */
+}
+#endif // CONFIG_ANDROID
+
void *gxp_dma_alloc_coherent(struct gxp_dev *gxp, uint core_list, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
uint gxp_dma_flags)
diff --git a/gxp-dma.h b/gxp-dma.h
index 37692af..d3dd81e 100644
--- a/gxp-dma.h
+++ b/gxp-dma.h
@@ -10,6 +10,9 @@
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/types.h>
+#ifdef CONFIG_ANDROID
+#include <soc/google/tpu-ext.h>
+#endif
#include "gxp-internal.h"
@@ -74,6 +77,29 @@ int gxp_dma_map_resources(struct gxp_dev *gxp);
*/
void gxp_dma_unmap_resources(struct gxp_dev *gxp);
+#ifdef CONFIG_ANDROID
+/**
+ * gxp_dma_map_tpu_buffer() - Map the tpu mbx queue buffers with fixed IOVAs
+ * @gxp: The GXP device to setup the mappings for
+ * @core_list: A bitfield enumerating the physical cores the mapping is for
+ * @mbx_info: Structure holding TPU-DSP mailbox queue buffer information
+ *
+ * Return:
+ * * 0 - Mappings created successfully
+ * * -EIO - Failed to create the mappings
+ */
+int gxp_dma_map_tpu_buffer(struct gxp_dev *gxp, uint core_list,
+ struct edgetpu_ext_mailbox_info *mbx_info);
+
+/**
+ * gxp_dma_unmap_tpu_buffer() - Unmap IOVAs mapped by gxp_dma_map_tpu_buffer()
+ * @gxp: The GXP device that was passed to gxp_dma_map_tpu_buffer()
+ * @mbx_desc: Structure holding info for already mapped TPU-DSP mailboxes.
+ */
+void gxp_dma_unmap_tpu_buffer(struct gxp_dev *gxp,
+ struct gxp_tpu_mbx_desc mbx_desc);
+#endif // CONFIG_ANDROID
+
/**
* gxp_dma_alloc_coherent() - Allocate and map a coherent buffer for a GXP core
* @gxp: The GXP device to map the allocated buffer for
diff --git a/gxp-firmware-data.c b/gxp-firmware-data.c
index 2af20e6..f782fcd 100644
--- a/gxp-firmware-data.c
+++ b/gxp-firmware-data.c
@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
+#include "gxp.h"
#include "gxp-firmware-data.h"
#include "gxp-host-device-structs.h"
#include "gxp-internal.h"
@@ -94,7 +95,7 @@ struct gxp_fw_data_manager {
struct fw_memory_allocator *allocator;
struct fw_memory sys_desc_mem;
struct fw_memory wdog_mem;
- struct fw_memory logging_tracing_mem;
+ struct fw_memory telemetry_mem;
};
/* A container holding information for a single GXP application. */
@@ -272,22 +273,22 @@ static struct fw_memory init_watchdog(struct gxp_fw_data_manager *mgr)
return mem;
}
-static struct fw_memory init_logging_tracing(struct gxp_fw_data_manager *mgr)
+static struct fw_memory init_telemetry(struct gxp_fw_data_manager *mgr)
{
- struct gxp_logging_tracing_descriptor *lt_region;
+ struct gxp_telemetry_descriptor *tel_region;
struct fw_memory mem;
- mem_alloc_allocate(mgr->allocator, &mem, sizeof(*lt_region),
- __alignof__(struct gxp_logging_tracing_descriptor));
+ mem_alloc_allocate(mgr->allocator, &mem, sizeof(*tel_region),
+ __alignof__(struct gxp_telemetry_descriptor));
- lt_region = mem.host_addr;
+ tel_region = mem.host_addr;
/*
- * Logging and tracing is disabled for now.
+ * Telemetry is disabled for now.
* Subsuequent calls to the FW data module can be used to populate or
* depopulate the descriptor pointers on demand.
*/
- memset(lt_region, 0x00, sizeof(*lt_region));
+ memset(tel_region, 0x00, sizeof(*tel_region));
return mem;
}
@@ -595,10 +596,9 @@ int gxp_fw_data_init(struct gxp_dev *gxp)
mgr->wdog_mem = init_watchdog(mgr);
mgr->system_desc->watchdog_dev_addr = mgr->wdog_mem.device_addr;
- /* Allocate the descriptor for device-side logging and tracing */
- mgr->logging_tracing_mem = init_logging_tracing(mgr);
- mgr->system_desc->logging_tracing_dev_addr =
- mgr->logging_tracing_mem.device_addr;
+ /* Allocate the descriptor for device-side telemetry */
+ mgr->telemetry_mem = init_telemetry(mgr);
+ mgr->system_desc->telemetry_dev_addr = mgr->telemetry_mem.device_addr;
return res;
@@ -689,7 +689,7 @@ void gxp_fw_data_destroy(struct gxp_dev *gxp)
{
struct gxp_fw_data_manager *mgr = gxp->data_mgr;
- mem_alloc_free(mgr->allocator, &mgr->logging_tracing_mem);
+ mem_alloc_free(mgr->allocator, &mgr->telemetry_mem);
mem_alloc_free(mgr->allocator, &mgr->wdog_mem);
mem_alloc_free(mgr->allocator, &mgr->sys_desc_mem);
mem_alloc_destroy(mgr->allocator);
@@ -708,3 +708,27 @@ void gxp_fw_data_destroy(struct gxp_dev *gxp)
gxp->data_mgr = NULL;
}
}
+
+int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
+ u32 *buffer_addrs,
+ u32 per_buffer_size)
+{
+ struct gxp_telemetry_descriptor *descriptor =
+ gxp->data_mgr->telemetry_mem.host_addr;
+ struct telemetry_descriptor *core_descriptors;
+ int i;
+
+ if (type == GXP_TELEMETRY_TYPE_LOGGING)
+ core_descriptors = descriptor->per_core_loggers;
+ else if (type == GXP_TELEMETRY_TYPE_TRACING)
+ core_descriptors = descriptor->per_core_tracers;
+ else
+ return -EINVAL;
+
+ for (i = 0; i < NUM_CORES; i++) {
+ core_descriptors[i].buffer_addr = buffer_addrs[i];
+ core_descriptors[i].buffer_size = per_buffer_size;
+ }
+
+ return 0;
+}
diff --git a/gxp-firmware-data.h b/gxp-firmware-data.h
index 9894971..1306303 100644
--- a/gxp-firmware-data.h
+++ b/gxp-firmware-data.h
@@ -53,4 +53,23 @@ void gxp_fw_data_destroy_app(struct gxp_dev *gxp, void *application);
*/
void gxp_fw_data_destroy(struct gxp_dev *gxp);
+/**
+ * gxp_fw_data_set_telemetry_descriptors() - Set new logging or tracing buffers
+ * for firmware to write to.
+ * @gxp: The GXP device to set buffer descriptors for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ * @buffer_addrs: An array containing the IOVA each physical core can access
+ * its logging or tracing buffer at
+ * @per_buffer_size: The size of each core's logging or tracing buffer in bytes
+ *
+ * `gxp_fw_data_init()` must have been called before this function.
+ *
+ * Return:
+ * 0 - Success
+ * -EINVAL - Invalid @type provided
+ */
+int gxp_fw_data_set_telemetry_descriptors(struct gxp_dev *gxp, u8 type,
+ u32 *buffer_addrs,
+ u32 per_buffer_size);
+
#endif /* __GXP_FIRMWARE_DATA_H__ */
diff --git a/gxp-firmware.c b/gxp-firmware.c
index 0c32749..89c0354 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -346,7 +346,7 @@ int gxp_fw_init(struct gxp_dev *gxp)
gxp->fwbufs[core].paddr =
r.start + (core * gxp->fwbufs[core].size);
/*
- * Firmware buffers are not mapped into kernal VA space until
+ * Firmware buffers are not mapped into kernel VA space until
* firmware is ready to be loaded.
*/
}
diff --git a/gxp-host-device-structs.h b/gxp-host-device-structs.h
index be29da2..52b5531 100644
--- a/gxp-host-device-structs.h
+++ b/gxp-host-device-structs.h
@@ -17,6 +17,14 @@
#define NUM_CORES 4
#define NUM_SYSTEM_SEMAPHORES 64
+/* Bit masks for the status fields in the telemetry structures. */
+/* The telemetry buffers have been setup by the host. */
+#define GXP_TELEMETRY_HOST_STATUS_ENABLED (1 << 0)
+/* The telemetry buffers are being used by the device. */
+#define GXP_TELEMETRY_DEVICE_STATUS_ENABLED (1 << 0)
+/* There was an attempt to use the buffers but their content was invalid. */
+#define GXP_TELEMETRY_DEVICE_STATUS_SANITY_CHECK_FAILED (1 << 1)
+
/* A structure describing the state of the doorbells on the system. */
struct gxp_doorbells_descriptor {
/* The app this descriptor belongs to. */
@@ -77,15 +85,26 @@ struct gxp_watchdog_descriptor {
uint32_t tripped;
};
-/* A structure describing the logging and tracing parameters and buffers. */
-struct gxp_logging_tracing_descriptor {
- /* A struct for describing the parameters for log and trace buffers */
- struct log_trace_descriptor {
+/*
+ * A structure describing the telemetry (loggging and tracing) parameters and
+ * buffers.
+ */
+struct gxp_telemetry_descriptor {
+ /* A struct for describing the parameters for telemetry buffers */
+ struct telemetry_descriptor {
+ /*
+ * The telemetry status from the host's point of view. See the
+ * top of the file for the appropriate flags.
+ */
+ uint32_t host_status;
+ /*
+ * The telemetry status from the device point of view. See the
+ * top of the file for the appropriate flags.
+ */
+ uint32_t device_status;
/*
- * The device address for the buffer used for storing events. A
- * value of 0 indicates the corresponding buffer hasn't been
- * allocated, or has been deallocated.
- * The head and tail indeces are described inside the data
+ * The device address for the buffer used for storing events.
+ * The head and tail indices are described inside the data
* pointed to by `buffer_addr`.
*/
uint32_t buffer_addr;
@@ -200,8 +219,8 @@ struct gxp_system_descriptor {
uint32_t app_descriptor_dev_addr[NUM_CORES];
/* A device address for the watchdog descriptor. */
uint32_t watchdog_dev_addr;
- /* A device address for the logging/tracing descriptor */
- uint32_t logging_tracing_dev_addr;
+ /* A device address for the telemetry descriptor */
+ uint32_t telemetry_dev_addr;
};
/* A structure describing the metadata belonging to a specific application. */
diff --git a/gxp-internal.h b/gxp-internal.h
index 1893108..0034800 100644
--- a/gxp-internal.h
+++ b/gxp-internal.h
@@ -21,11 +21,19 @@
#include "gxp-config.h"
#include "gxp-tmp.h"
+/* Holds Client's TPU mailboxes info used during mapping */
+struct gxp_tpu_mbx_desc {
+ uint phys_core_list;
+ size_t cmdq_size, respq_size;
+};
+
/* Holds state belonging to a client */
struct gxp_client {
struct gxp_dev *gxp;
void *app;
bool vd_allocated;
+ bool tpu_mbx_allocated;
+ struct gxp_tpu_mbx_desc mbx_desc;
};
/* ioremapped resource */
@@ -43,6 +51,7 @@ struct mailbox_resp_list {
/* Structure to hold TPU device info */
struct gxp_tpu_dev {
+ struct device *dev;
phys_addr_t mbx_paddr;
};
@@ -52,6 +61,8 @@ struct gxp_debug_dump_manager;
struct gxp_mapping_root;
struct gxp_dma_manager;
struct gxp_fw_data_manager;
+struct gxp_power_manager;
+struct gxp_telemetry_manager;
struct gxp_dev {
struct device *dev; /* platform bus device */
@@ -63,6 +74,7 @@ struct gxp_dev {
struct gxp_mapped_resource fwdatabuf; /* Shared FW data carveout */
struct gxp_mapped_resource coredumpbuf; /* core dump carveout */
struct gxp_mailbox_manager *mailbox_mgr;
+ struct gxp_power_manager *power_mgr;
/*
* TODO(b/182416287): This should be a rb_tree of lists keyed by
* virtual device. For now, keep an array of one list per physical core
@@ -79,6 +91,7 @@ struct gxp_dev {
struct gxp_dma_manager *dma_mgr;
struct gxp_fw_data_manager *data_mgr;
struct gxp_tpu_dev tpu_dev;
+ struct gxp_telemetry_manager *telemetry_mgr;
};
/* GXP device IO functions */
diff --git a/gxp-lpm.c b/gxp-lpm.c
index de0f74a..ff42320 100644
--- a/gxp-lpm.c
+++ b/gxp-lpm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * GXP power management interface.
+ * GXP local power management interface.
*
* Copyright (C) 2021 Google LLC
*/
@@ -36,7 +36,7 @@ static void enable_state(struct gxp_dev *gxp, uint psm, uint state)
lpm_write_32_psm(gxp, psm, offset, 0x1);
}
-static bool is_initialized(struct gxp_dev *gxp, uint psm)
+bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm)
{
u32 status = lpm_read_32_psm(gxp, psm, PSM_STATUS_OFFSET);
@@ -57,28 +57,6 @@ static uint get_state(struct gxp_dev *gxp, uint psm)
return status & PSM_CURR_STATE_MASK;
}
-int gxp_blk_set_state(struct gxp_dev *gxp, unsigned long state)
-{
- int ret = 0;
-
-#ifdef CONFIG_GXP_CLOUDRIPPER
- ret = exynos_acpm_set_rate(AUR_DVFS_DOMAIN, state);
- dev_dbg(gxp->dev, "%s: state %lu, ret %d\n", __func__, state, ret);
-#endif
- return ret;
-}
-
-int gxp_blk_get_state(struct gxp_dev *gxp)
-{
- int ret = 0;
-
-#ifdef CONFIG_GXP_CLOUDRIPPER
- ret = exynos_acpm_get_rate(AUR_DVFS_DOMAIN, AUR_DEBUG_CORE_FREQ);
- dev_dbg(gxp->dev, "%s: state %d\n", __func__, ret);
-#endif
- return ret;
-}
-
static int set_state_internal(struct gxp_dev *gxp, uint psm, uint target_state)
{
u32 val;
@@ -143,7 +121,7 @@ static int psm_enable(struct gxp_dev *gxp, uint psm)
int i = 10000;
/* Return early if LPM is already initialized */
- if (is_initialized(gxp, psm)) {
+ if (gxp_lpm_is_initialized(gxp, psm)) {
if (psm != LPM_TOP_PSM) {
/* Ensure core is in PS2 */
return set_state(gxp, psm, LPM_PG_W_RET_STATE);
@@ -228,7 +206,7 @@ int gxp_lpm_up(struct gxp_dev *gxp, uint core)
dev_notice(gxp->dev, "Enabling Core%u PSM...\n", core);
if (psm_enable(gxp, core)) {
dev_notice(gxp->dev, "Timed out!\n");
- return 0;
+ return -ETIMEDOUT;
}
dev_notice(gxp->dev, "Enabled\n");
diff --git a/gxp-lpm.h b/gxp-lpm.h
index 3a4fbb7..8ffbb0e 100644
--- a/gxp-lpm.h
+++ b/gxp-lpm.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * GXP power management interface.
+ * GXP local power management interface.
+ * Controlling Local Power Manager hardware.
*
* Copyright (C) 2020 Google LLC
*/
@@ -41,8 +42,6 @@ enum lpm_state {
#define AUR_DVFS_DEBUG_REQ (1 << 31)
#define AUR_DEBUG_CORE_FREQ (AUR_DVFS_DEBUG_REQ | (3 << 27))
-#define AUR_DVFS_MIN_STATE 178000
-
/*
* Initializes the power manager for the first time after block power up.
* The function needs to be called once after a block power up event.
@@ -62,14 +61,10 @@ int gxp_lpm_up(struct gxp_dev *gxp, uint core);
*/
void gxp_lpm_down(struct gxp_dev *gxp, uint core);
/*
- * Sets the block-level DVFS state. This function can be called at any point
- * after block power on.
- */
-int gxp_blk_set_state(struct gxp_dev *gxp, unsigned long state);
-/*
- * Returns the current DVFS state of the Aurora block.
+ * Return whether the specified PSM is initialized.
+ * PSM0-PSM3 are for core0-core3, PSM4 is the TOP LPM.
*/
-int gxp_blk_get_state(struct gxp_dev *gxp);
+bool gxp_lpm_is_initialized(struct gxp_dev *gxp, uint psm);
static inline u32 lpm_read_32(struct gxp_dev *gxp, uint reg_offset)
{
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index c262923..549a6e1 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -549,7 +549,7 @@ void gxp_mailbox_release(struct gxp_mailbox_manager *mgr,
if (!mailbox) {
dev_err(mgr->gxp->dev,
- "Attempt to release nonexistant mailbox\n");
+ "Attempt to release nonexistent mailbox\n");
return;
}
diff --git a/gxp-platform.c b/gxp-platform.c
index f8eaa5c..df7ab49 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -15,12 +15,16 @@
#include <linux/fs.h>
#include <linux/genalloc.h>
#include <linux/kthread.h>
+#include <linux/log2.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
+#ifdef CONFIG_ANDROID
+#include <soc/google/tpu-ext.h>
+#endif
#include "gxp.h"
#include "gxp-debug-dump.h"
@@ -32,6 +36,8 @@
#include "gxp-mailbox.h"
#include "gxp-mailbox-driver.h"
#include "gxp-mapping.h"
+#include "gxp-pm.h"
+#include "gxp-telemetry.h"
#include "gxp-vd.h"
#ifdef CONFIG_ANDROID
@@ -418,6 +424,319 @@ static int gxp_allocate_vd(struct gxp_client *client,
return ret;
}
+static int
+gxp_etm_trace_start_command(struct gxp_client *client,
+ struct gxp_etm_trace_start_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_etm_trace_start_ioctl ibuf;
+ int phys_core;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ ibuf.trace_ram_enable &= ETM_TRACE_LSB_MASK;
+ ibuf.atb_enable &= ETM_TRACE_LSB_MASK;
+
+ if (!ibuf.trace_ram_enable && !ibuf.atb_enable)
+ return -EINVAL;
+
+ if (!(ibuf.sync_msg_period == 0 ||
+ (ibuf.sync_msg_period <= ETM_TRACE_SYNC_MSG_PERIOD_MAX &&
+ ibuf.sync_msg_period >= ETM_TRACE_SYNC_MSG_PERIOD_MIN &&
+ is_power_of_2(ibuf.sync_msg_period))))
+ return -EINVAL;
+
+ if (ibuf.pc_match_mask_length > ETM_TRACE_PC_MATCH_MASK_LEN_MAX)
+ return -EINVAL;
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client, ibuf.virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Trace start failed: Invalid virtual core id (%u)\n",
+ ibuf.virtual_core_id);
+ return -EINVAL;
+ }
+
+ /*
+ * TODO (b/185260919): Pass the etm trace configuration to system FW
+ * once communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+ return 0;
+}
+
+static int gxp_etm_trace_sw_stop_command(struct gxp_client *client,
+ __u16 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ u16 virtual_core_id;
+ int phys_core;
+
+ if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
+ return -EFAULT;
+
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client, virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Trace stop via software trigger failed: Invalid virtual core id (%u)\n",
+ virtual_core_id);
+ return -EINVAL;
+ }
+
+ /*
+ * TODO (b/185260919): Pass the etm stop signal to system FW once
+ * communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+ return 0;
+}
+
+static int gxp_etm_trace_cleanup_command(struct gxp_client *client,
+ __u16 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ u16 virtual_core_id;
+ int phys_core;
+
+ if (copy_from_user(&virtual_core_id, argp, sizeof(virtual_core_id)))
+ return -EFAULT;
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client, virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Trace cleanup failed: Invalid virtual core id (%u)\n",
+ virtual_core_id);
+ return -EINVAL;
+ }
+
+ /*
+ * TODO (b/185260919): Pass the etm clean up signal to system FW once
+ * communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+ return 0;
+}
+
+static int
+gxp_etm_get_trace_info_command(struct gxp_client *client,
+ struct gxp_etm_get_trace_info_ioctl __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_etm_get_trace_info_ioctl ibuf;
+ int phys_core;
+ u32 *trace_header;
+ u32 *trace_data;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ if (ibuf.type > 1)
+ return -EINVAL;
+
+ phys_core = gxp_vd_virt_core_to_phys_core(client, ibuf.virtual_core_id);
+ if (phys_core < 0) {
+ dev_err(gxp->dev, "Get trace info failed: Invalid virtual core id (%u)\n",
+ ibuf.virtual_core_id);
+ return -EINVAL;
+ }
+
+ trace_header = kzalloc(GXP_TRACE_HEADER_SIZE, GFP_KERNEL);
+ trace_data = kzalloc(GXP_TRACE_RAM_SIZE, GFP_KERNEL);
+
+ /*
+ * TODO (b/185260919): Get trace information from system FW once
+ * communication channel between kernel and system FW is ready
+ * (b/185819530).
+ */
+
+ if (copy_to_user((void __user *)ibuf.trace_header_addr, trace_header,
+ GXP_TRACE_HEADER_SIZE)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (ibuf.type == 1) {
+ if (copy_to_user((void __user *)ibuf.trace_data_addr,
+ trace_data, GXP_TRACE_RAM_SIZE)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+out:
+ kfree(trace_header);
+ kfree(trace_data);
+
+ return ret;
+}
+
+static int gxp_enable_telemetry(struct gxp_client *client,
+ __u8 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ __u8 type;
+
+ if (copy_from_user(&type, argp, sizeof(type)))
+ return -EFAULT;
+
+ if (type != GXP_TELEMETRY_TYPE_LOGGING &&
+ type != GXP_TELEMETRY_TYPE_TRACING)
+ return -EINVAL;
+
+ return gxp_telemetry_enable(gxp, type);
+}
+
+static int gxp_disable_telemetry(struct gxp_client *client, __u8 __user *argp)
+{
+ struct gxp_dev *gxp = client->gxp;
+ __u8 type;
+
+ if (copy_from_user(&type, argp, sizeof(type)))
+ return -EFAULT;
+
+ if (type != GXP_TELEMETRY_TYPE_LOGGING &&
+ type != GXP_TELEMETRY_TYPE_TRACING)
+ return -EINVAL;
+
+ return gxp_telemetry_disable(gxp, type);
+}
+
+static int gxp_map_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl __user *argp)
+{
+#ifdef CONFIG_ANDROID
+ struct gxp_dev *gxp = client->gxp;
+ struct edgetpu_ext_mailbox_info *mbx_info;
+ struct gxp_tpu_mbx_queue_ioctl ibuf;
+ struct edgetpu_ext_client_info gxp_tpu_info;
+ u32 phys_core_list = 0;
+ u32 virtual_core_list;
+ u32 core_count;
+ int ret = 0;
+
+ if (!gxp->tpu_dev.mbx_paddr) {
+ dev_err(gxp->dev, "%s: TPU is not available for interop\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ virtual_core_list = ibuf.virtual_core_list;
+ core_count = hweight_long(virtual_core_list);
+ phys_core_list = gxp_vd_virt_core_list_to_phys_core_list(
+ client, virtual_core_list);
+ if (!phys_core_list) {
+ dev_err(gxp->dev, "%s: invalid virtual core list 0x%x\n",
+ __func__, virtual_core_list);
+ return -EINVAL;
+ }
+
+ mbx_info =
+ kmalloc(sizeof(struct edgetpu_ext_mailbox_info) + core_count *
+ sizeof(struct edgetpu_ext_mailbox_descriptor),
+ GFP_KERNEL);
+ if (!mbx_info)
+ return -ENOMEM;
+
+ mutex_lock(&gxp->vd_lock);
+
+ if (client->tpu_mbx_allocated) {
+ dev_err(gxp->dev, "%s: Mappings already exist for TPU mailboxes\n",
+ __func__);
+ ret = -EBUSY;
+ goto error;
+ }
+
+ gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
+ gxp_tpu_info.mbox_map = phys_core_list;
+ gxp_tpu_info.attr = (struct edgetpu_mailbox_attr __user *)ibuf.attr_ptr;
+ ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ ALLOCATE_EXTERNAL_MAILBOX, &gxp_tpu_info,
+ mbx_info);
+ if (ret) {
+ dev_err(gxp->dev, "%s: Failed to allocate ext tpu mailboxes %d\n",
+ __func__, ret);
+ goto error;
+ }
+ /* Align queue size to page size for iommu map. */
+ mbx_info->cmdq_size = ALIGN(mbx_info->cmdq_size, PAGE_SIZE);
+ mbx_info->respq_size = ALIGN(mbx_info->respq_size, PAGE_SIZE);
+
+ ret = gxp_dma_map_tpu_buffer(gxp, phys_core_list, mbx_info);
+ if (ret) {
+ dev_err(gxp->dev, "%s: failed to map TPU mailbox buffer %d\n",
+ __func__, ret);
+ edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
+ NULL);
+ goto error;
+ }
+ client->mbx_desc.phys_core_list = phys_core_list;
+ client->mbx_desc.cmdq_size = mbx_info->cmdq_size;
+ client->mbx_desc.respq_size = mbx_info->respq_size;
+ client->tpu_mbx_allocated = true;
+
+error:
+ mutex_unlock(&gxp->vd_lock);
+
+ kfree(mbx_info);
+ return ret;
+#else
+ return -ENODEV;
+#endif
+}
+
+static int gxp_unmap_tpu_mbx_queue(struct gxp_client *client,
+ struct gxp_tpu_mbx_queue_ioctl __user *argp)
+{
+#ifdef CONFIG_ANDROID
+ struct gxp_dev *gxp = client->gxp;
+ struct gxp_tpu_mbx_queue_ioctl ibuf;
+ struct edgetpu_ext_client_info gxp_tpu_info;
+ int ret = 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ mutex_lock(&gxp->vd_lock);
+
+ if (!client->tpu_mbx_allocated) {
+ dev_err(gxp->dev, "%s: No mappings exist for TPU mailboxes\n",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ gxp_dma_unmap_tpu_buffer(gxp, client->mbx_desc);
+
+ gxp_tpu_info.tpu_fd = ibuf.tpu_fd;
+ ret = edgetpu_ext_driver_cmd(gxp->tpu_dev.dev,
+ EDGETPU_EXTERNAL_CLIENT_TYPE_DSP,
+ FREE_EXTERNAL_MAILBOX, &gxp_tpu_info,
+ NULL);
+ if (ret) {
+ dev_err(gxp->dev, "%s: Failed to free ext tpu mailboxes %d\n",
+ __func__, ret);
+ goto out;
+ }
+ client->tpu_mbx_allocated = false;
+
+out:
+ mutex_unlock(&gxp->vd_lock);
+
+ return ret;
+#else
+ return -ENODEV;
+#endif
+}
+
static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
{
struct gxp_client *client = file->private_data;
@@ -446,6 +765,30 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
case GXP_ALLOCATE_VIRTUAL_DEVICE:
ret = gxp_allocate_vd(client, argp);
break;
+ case GXP_ETM_TRACE_START_COMMAND:
+ ret = gxp_etm_trace_start_command(client, argp);
+ break;
+ case GXP_ETM_TRACE_SW_STOP_COMMAND:
+ ret = gxp_etm_trace_sw_stop_command(client, argp);
+ break;
+ case GXP_ETM_TRACE_CLEANUP_COMMAND:
+ ret = gxp_etm_trace_cleanup_command(client, argp);
+ break;
+ case GXP_ETM_GET_TRACE_INFO_COMMAND:
+ ret = gxp_etm_get_trace_info_command(client, argp);
+ break;
+ case GXP_ENABLE_TELEMETRY:
+ ret = gxp_enable_telemetry(client, argp);
+ break;
+ case GXP_DISABLE_TELEMETRY:
+ ret = gxp_disable_telemetry(client, argp);
+ break;
+ case GXP_MAP_TPU_MBX_QUEUE:
+ ret = gxp_map_tpu_mbx_queue(client, argp);
+ break;
+ case GXP_UNMAP_TPU_MBX_QUEUE:
+ ret = gxp_unmap_tpu_mbx_queue(client, argp);
+ break;
default:
ret = -ENOTTY; /* unknown command */
}
@@ -453,8 +796,31 @@ static long gxp_ioctl(struct file *file, uint cmd, ulong arg)
return ret;
}
+static int gxp_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct gxp_client *client = file->private_data;
+
+ if (!client)
+ return -ENODEV;
+
+ switch (vma->vm_pgoff << PAGE_SHIFT) {
+ case GXP_MMAP_LOG_BUFFER_OFFSET:
+ return gxp_telemetry_mmap_buffers(client->gxp,
+ GXP_TELEMETRY_TYPE_LOGGING,
+ vma);
+ case GXP_MMAP_TRACE_BUFFER_OFFSET:
+ return gxp_telemetry_mmap_buffers(client->gxp,
+ GXP_TELEMETRY_TYPE_TRACING,
+ vma);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct file_operations gxp_fops = {
.owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .mmap = gxp_mmap,
.open = gxp_open,
.release = gxp_release,
.unlocked_ioctl = gxp_ioctl,
@@ -467,6 +833,7 @@ static int gxp_platform_probe(struct platform_device *pdev)
struct resource *r;
phys_addr_t offset, base_addr;
struct device_node *np;
+ struct platform_device *tpu_pdev;
int ret;
int i __maybe_unused;
bool tpu_found __maybe_unused;
@@ -506,7 +873,7 @@ static int gxp_platform_probe(struct platform_device *pdev)
goto err;
}
-#ifdef CONFIG_GXP_CLOUDRIPPER
+#if defined(CONFIG_GXP_CLOUDRIPPER) && !defined(CONFIG_GXP_TEST)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret) {
@@ -541,6 +908,11 @@ static int gxp_platform_probe(struct platform_device *pdev)
dev_warn(dev, "No tpu-device in device tree\n");
tpu_found = false;
}
+ tpu_pdev = of_find_device_by_node(np);
+ if (!tpu_pdev) {
+ dev_err(dev, "TPU device not found\n");
+ tpu_found = false;
+ }
/* get tpu mailbox register base */
ret = of_property_read_u64_index(np, "reg", 0, &base_addr);
of_node_put(np);
@@ -556,6 +928,8 @@ static int gxp_platform_probe(struct platform_device *pdev)
tpu_found = false;
}
if (tpu_found) {
+ gxp->tpu_dev.dev = &tpu_pdev->dev;
+ get_device(gxp->tpu_dev.dev);
gxp->tpu_dev.mbx_paddr = base_addr + offset;
} else {
dev_warn(dev, "TPU will not be available for interop\n");
@@ -566,7 +940,7 @@ static int gxp_platform_probe(struct platform_device *pdev)
ret = gxp_dma_init(gxp);
if (ret) {
dev_err(dev, "Failed to initialize GXP DMA interface\n");
- goto err;
+ goto err_put_tpu_dev;
}
gxp->mailbox_mgr = gxp_mailbox_create_manager(gxp, GXP_NUM_CORES);
@@ -609,7 +983,9 @@ static int gxp_platform_probe(struct platform_device *pdev)
}
gxp_fw_data_init(gxp);
+ gxp_telemetry_init(gxp);
gxp_create_debugfs(gxp);
+ gxp_pm_init(gxp);
dev_dbg(dev, "Probe finished\n");
return 0;
@@ -620,6 +996,10 @@ err_debug_dump_exit:
gxp_debug_dump_exit(gxp);
err_dma_exit:
gxp_dma_exit(gxp);
+err_put_tpu_dev:
+#ifndef CONFIG_GXP_USE_SW_MAILBOX
+ put_device(gxp->tpu_dev.dev);
+#endif
err:
misc_deregister(&gxp->misc_dev);
devm_kfree(dev, (void *)gxp);
@@ -637,12 +1017,16 @@ static int gxp_platform_remove(struct platform_device *pdev)
gxp_vd_destroy(gxp);
gxp_dma_unmap_resources(gxp);
gxp_dma_exit(gxp);
+#ifndef CONFIG_GXP_USE_SW_MAILBOX
+ put_device(gxp->tpu_dev.dev);
+#endif
misc_deregister(&gxp->misc_dev);
#ifdef CONFIG_GXP_CLOUDRIPPER
// Request to power off BLK_AUR
- pm_runtime_put_sync(dev);
+ gxp_pm_blk_off(gxp);
pm_runtime_disable(dev);
+ gxp_pm_destroy(gxp);
#endif
devm_kfree(dev, (void *)gxp);
diff --git a/gxp-pm.c b/gxp-pm.c
new file mode 100644
index 0000000..cd478fb
--- /dev/null
+++ b/gxp-pm.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP power management.
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_GXP_CLOUDRIPPER
+#include <linux/acpm_dvfs.h>
+#endif
+
+#include "gxp-bpm.h"
+#include "gxp-doorbell.h"
+#include "gxp-internal.h"
+#include "gxp-lpm.h"
+#include "gxp-pm.h"
+
+static struct gxp_pm_device_ops gxp_aur_ops = {
+ .pre_blk_powerup = NULL,
+ .post_blk_powerup = NULL,
+ .pre_blk_poweroff = NULL,
+ .post_blk_poweroff = NULL,
+};
+
+static int gxp_pm_blkpwr_up(struct gxp_dev *gxp)
+{
+ int ret = 0;
+
+#ifdef CONFIG_GXP_CLOUDRIPPER
+ ret = pm_runtime_get_sync(gxp->dev);
+ if (ret)
+ dev_err(gxp->dev, "%s: pm_runtime_get_sync returned %d\n",
+ __func__, ret);
+#endif
+ return ret;
+}
+
+static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
+{
+ int ret = 0;
+
+#ifdef CONFIG_GXP_CLOUDRIPPER
+ /*
+ * Need to put TOP LPM into active state before blk off
+ * b/189396709
+ */
+ lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_1, 0x0);
+ lpm_write_32_psm(gxp, LPM_TOP_PSM, LPM_REG_ENABLE_STATE_2, 0x0);
+ ret = pm_runtime_put_sync(gxp->dev);
+ if (ret)
+ dev_err(gxp->dev, "%s: pm_runtime_put_sync returned %d\n",
+ __func__, ret);
+#endif
+ return ret;
+}
+
+int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state)
+{
+ int ret = 0;
+
+#ifdef CONFIG_GXP_CLOUDRIPPER
+ ret = exynos_acpm_set_rate(AUR_DVFS_DOMAIN, state);
+ dev_dbg(gxp->dev, "%s: state %lu, ret %d\n", __func__, state, ret);
+#endif
+ return ret;
+}
+
+int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp)
+{
+ int ret = 0;
+
+#ifdef CONFIG_GXP_CLOUDRIPPER
+ ret = exynos_acpm_get_rate(AUR_DVFS_DOMAIN, AUR_DEBUG_CORE_FREQ);
+ dev_dbg(gxp->dev, "%s: state %d\n", __func__, ret);
+#endif
+ return ret;
+}
+
+int gxp_pm_blk_on(struct gxp_dev *gxp)
+{
+ int ret = 0;
+
+ if (WARN_ON(!gxp->power_mgr)) {
+ dev_err(gxp->dev, "%s: No PM found\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ ret = gxp_pm_blkpwr_up(gxp);
+ if (!ret) {
+ gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE);
+ gxp->power_mgr->curr_state = AUR_INIT_DVFS_STATE;
+ }
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+
+ return ret;
+}
+
+int gxp_pm_blk_off(struct gxp_dev *gxp)
+{
+ int ret = 0;
+
+ if (WARN_ON(!gxp->power_mgr)) {
+ dev_err(gxp->dev, "%s: No PM found\n", __func__);
+ return -ENODEV;
+ }
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ if (refcount_read(&(gxp->power_mgr->blk_wake_ref))) {
+ dev_err(gxp->dev, "%s: Wake lock not released\n", __func__);
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ return -EBUSY;
+ }
+
+ ret = gxp_pm_blkpwr_down(gxp);
+ if (!ret)
+ gxp->power_mgr->curr_state = AUR_OFF;
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ return ret;
+}
+
+int gxp_pm_get_blk_state(struct gxp_dev *gxp)
+{
+ int ret;
+
+ if (!gxp->power_mgr) {
+ dev_err(gxp->dev, "%s: No PM found\n", __func__);
+ return -ENODEV;
+ }
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ ret = gxp->power_mgr->curr_state;
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+
+ return ret;
+}
+
+int gxp_pm_core_on(struct gxp_dev *gxp, uint core)
+{
+ int ret = 0;
+
+ /*
+ * Check if TOP LPM is already on.
+ */
+ WARN_ON(!gxp_lpm_is_initialized(gxp, LPM_TOP_PSM));
+
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ ret = gxp_lpm_up(gxp, core);
+ if (ret) {
+ dev_err(gxp->dev, "%s: Core %d on fail\n", __func__, core);
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ return ret;
+ }
+
+ gxp->power_mgr->pwr_state_req[core] = gxp->power_mgr->curr_state;
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+
+ dev_notice(gxp->dev, "%s: Core %d up\n", __func__, core);
+ return ret;
+}
+
+int gxp_pm_core_off(struct gxp_dev *gxp, uint core)
+{
+ /*
+ * Check if TOP LPM is already on.
+ */
+ WARN_ON(!gxp_lpm_is_initialized(gxp, LPM_TOP_PSM));
+
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ gxp_lpm_down(gxp, core);
+ gxp->power_mgr->pwr_state_req[core] = AUR_OFF;
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ /*
+ * TODO: b/199467568 If all cores are off shutdown blk
+ */
+ dev_notice(gxp->dev, "%s: Core %d down\n", __func__, core);
+ return 0;
+}
+
+int gxp_pm_get_core_state(struct gxp_dev *gxp, uint core)
+{
+ int ret;
+
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ ret = gxp->power_mgr->pwr_state_req[core];
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+
+ return ret;
+}
+
+int gxp_pm_req_state(struct gxp_dev *gxp, uint core, enum aur_power_state state)
+{
+ int i;
+ unsigned long curr_max_state = AUR_OFF;
+
+ if (core >= GXP_NUM_CORES) {
+ dev_err(gxp->dev, "Invalid core num %d\n", core);
+ return -EINVAL;
+ }
+
+ if (state > AUR_MAX_ALLOW_STATE) {
+ dev_err(gxp->dev, "Invalid state %d\n", state);
+ return -EINVAL;
+ }
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ gxp->power_mgr->pwr_state_req[core] = state;
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ if (gxp->power_mgr->pwr_state_req[i] >= curr_max_state)
+ curr_max_state = gxp->power_mgr->pwr_state_req[i];
+ }
+
+ if (state == AUR_OFF)
+ gxp_pm_core_off(gxp, core);
+ if (curr_max_state != gxp->power_mgr->curr_state &&
+ curr_max_state > AUR_OFF) {
+ gxp_pm_blk_set_state_acpm(gxp, curr_max_state);
+ gxp->power_mgr->curr_state = curr_max_state;
+ } else {
+ /*
+ * TODO: b/199467568 If all cores are off shutdown blk
+ */
+ }
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+
+ return 0;
+}
+
+int gxp_pm_acquire_blk_wakelock(struct gxp_dev *gxp)
+{
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ refcount_inc(&(gxp->power_mgr->blk_wake_ref));
+ dev_dbg(gxp->dev, "Blk wakelock ref count: %d\n",
+ refcount_read(&(gxp->power_mgr->blk_wake_ref)));
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ return 0;
+}
+
+int gxp_pm_release_blk_wakelock(struct gxp_dev *gxp)
+{
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ if (refcount_read(&(gxp->power_mgr->blk_wake_ref))) {
+ refcount_dec(&(gxp->power_mgr->blk_wake_ref));
+ } else {
+ dev_err(gxp->dev, "Blk wakelock is already zero\n");
+ WARN_ON(1);
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ return -EIO;
+ }
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ dev_notice(gxp->dev, "Release blk wakelock\n");
+ return 0;
+}
+
+int gxp_pm_init(struct gxp_dev *gxp)
+{
+ struct gxp_power_manager *mgr;
+ int i;
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+ mgr->gxp = gxp;
+ mutex_init(&mgr->pm_lock);
+ mgr->curr_state = AUR_OFF;
+ refcount_set(&(mgr->blk_wake_ref), 0);
+ for (i = 0; i < GXP_NUM_CORES; i++)
+ mgr->pwr_state_req[i] = AUR_OFF;
+ mgr->ops = &gxp_aur_ops;
+ gxp->power_mgr = mgr;
+ return 0;
+}
+
+int gxp_pm_destroy(struct gxp_dev *gxp)
+{
+ struct gxp_power_manager *mgr;
+
+ mgr = gxp->power_mgr;
+ mutex_destroy(&mgr->pm_lock);
+ return 0;
+}
diff --git a/gxp-pm.h b/gxp-pm.h
new file mode 100644
index 0000000..fdee637
--- /dev/null
+++ b/gxp-pm.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP power management.
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+#ifndef __GXP_PM_H__
+#define __GXP_PM_H__
+
+#include "gxp-internal.h"
+#include <linux/refcount.h>
+
+#define AUR_DVFS_MIN_STATE 178000
+
+enum aur_power_state {
+ AUR_OFF = 0,
+ AUR_UUD = 178000,
+ AUR_SUD = 373000,
+ AUR_UD = 750000,
+ AUR_NOM = 1160000,
+};
+
+#define AUR_INIT_DVFS_STATE AUR_NOM
+#define AUR_MAX_ALLOW_STATE AUR_NOM
+
+struct gxp_pm_device_ops {
+ int (*pre_blk_powerup)(struct gxp_dev *gxp);
+ int (*post_blk_powerup)(struct gxp_dev *gxp);
+ int (*pre_blk_poweroff)(struct gxp_dev *gxp);
+ int (*post_blk_poweroff)(struct gxp_dev *gxp);
+};
+
+struct gxp_power_manager {
+ struct gxp_dev *gxp;
+ struct mutex pm_lock;
+ int pwr_state_req[GXP_NUM_CORES];
+ int curr_state;
+ refcount_t blk_wake_ref;
+ struct gxp_pm_device_ops *ops;
+};
+
+/**
+ * gxp_pm_blk_on() - Turn on the power for BLK_AUR
+ * @gxp: The GXP device to turn on
+ *
+ * Return:
+ * * 0 - BLK ON successfully
+ * * -ENODEV - Cannot find PM interface
+ */
+int gxp_pm_blk_on(struct gxp_dev *gxp);
+
+/**
+ * gxp_pm_blk_off() - Turn off the power for BLK_AUR
+ * @gxp: The GXP device to turn off
+ *
+ * Return:
+ * * 0 - BLK OFF successfully
+ * * -ENODEV - Cannot find PM interface
+ * * -EBUSY - Wakelock is held, blk is still busy
+ */
+int gxp_pm_blk_off(struct gxp_dev *gxp);
+
+/**
+ * gxp_pm_get_blk_state() - Get the blk power state
+ * @gxp: The GXP device to sample state
+ *
+ * Return:
+ * * state - State number represented in kHZ, or 0 if OFF
+ */
+int gxp_pm_get_blk_state(struct gxp_dev *gxp);
+
+/**
+ * gxp_pm_core_on() - Turn on a core on GXP device
+ * @gxp: The GXP device to operate
+ * @core: The core ID to turn on
+ *
+ * Return:
+ * * 0 - Core on process finished successfully
+ * * -ETIMEDOUT - Core on process timed-out.
+ */
+int gxp_pm_core_on(struct gxp_dev *gxp, uint core);
+
+/**
+ * gxp_pm_core_off() - Turn off a core on GXP device
+ * @gxp: The GXP device to operate
+ * @core: The core ID to turn off
+ *
+ * Return:
+ * * 0 - Core off process finished successfully
+ */
+int gxp_pm_core_off(struct gxp_dev *gxp, uint core);
+
+/**
+ * gxp_pm_get_core_state() - Get the core power state
+ * @gxp: The GXP device to operate
+ * @core: The core ID to get the state of
+ *
+ * Return:
+ * * state - Frequency number in kHz the core requested
+ */
+int gxp_pm_get_core_state(struct gxp_dev *gxp, uint core);
+
+/**
+ * gxp_pm_acquire_blk_wakelock() - Acquire blk wakelock
+ * to make sure block won't shutdown.
+ *
+ * Can be called multiple times and it will increase
+ * reference count.
+ *
+ * @gxp: The GXP device to operate
+ *
+ * Return:
+ * * 0 - Wakelock acquired
+ */
+int gxp_pm_acquire_blk_wakelock(struct gxp_dev *gxp);
+
+/**
+ * gxp_pm_release_blk_wakelock() - Release blk wakelock.
+ *
+ * Can be called multiple times and it will decrease
+ * reference count till 0.
+ *
+ * @gxp: The GXP device to operate
+ *
+ * Return:
+ * * 0 - Wakelock released
+ * * -EIO - No wakelock is currently held
+ */
+int gxp_pm_release_blk_wakelock(struct gxp_dev *gxp);
+
+/**
+ * gxp_pm_req_state() - API for a GXP core to vote for a
+ * desired power state.
+ * @gxp: The GXP device to operate
+ * @core: Voting core ID
+ * @state: State the core voting for
+ *
+ * Return:
+ * * 0 - Voting registered
+ * * -EINVAL - Invalid core num
+ */
+int gxp_pm_req_state(struct gxp_dev *gxp, uint core, enum aur_power_state state);
+
+/**
+ * gxp_pm_init() - API for initialize PM
+ * interface for GXP, should only be called
+ * once per probe
+ * @gxp: The GXP device to operate
+ *
+ * Return:
+ * * 0 - Initialization finished successfully
+ * * -ENOMEM - Cannot get memory to finish init.
+ */
+int gxp_pm_init(struct gxp_dev *gxp);
+
+/**
+ * gxp_pm_destroy() - API for removing
+ * the power management interface
+ * @gxp: The GXP device to operate
+ *
+ * Return:
+ * * 0 - Remove finished successfully
+ */
+int gxp_pm_destroy(struct gxp_dev *gxp);
+
+/**
+ * gxp_pm_blk_set_state_acpm() - API for setting the block-level DVFS state.
+ * This function can be called at any point after block power on.
+ * @gxp: The GXP device to operate
+ * @state: State number in khz that need to be set.
+ * Supported state is in enum aur_power_state,
+ * if experiment is needed for unsupported state
+ * please refer to Lassen's ECT table.
+ *
+ * Return:
+ * * 0 - Set finished successfully
+ * * Other - Set state encounter issue in exynos_acpm_set_rate
+ */
+int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state);
+
+/**
+ * gxp_pm_blk_get_state_acpm() - API for getting
+ * the current DVFS state of the Aurora block.
+ * @gxp: The GXP device to operate
+ *
+ * Return:
+ * * State - State number in Khz from ACPM
+ */
+int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp);
+#endif /* __GXP_PM_H__ */
diff --git a/gxp-telemetry.c b/gxp-telemetry.c
new file mode 100644
index 0000000..4e97ec1
--- /dev/null
+++ b/gxp-telemetry.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GXP telemetry support
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+
+#include <linux/slab.h>
+
+#include "gxp-config.h"
+#include "gxp-dma.h"
+#include "gxp-firmware-data.h"
+#include "gxp-telemetry.h"
+
+int gxp_telemetry_init(struct gxp_dev *gxp)
+{
+ struct gxp_telemetry_manager *mgr;
+
+ mgr = devm_kzalloc(gxp->dev, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ mutex_init(&mgr->lock);
+
+ gxp->telemetry_mgr = mgr;
+
+ return 0;
+}
+
+/* Wrapper struct to be used by the telemetry vma_ops. */
+struct telemetry_vma_data {
+ struct gxp_dev *gxp;
+ struct buffer_data *data;
+ u8 type;
+};
+
+static void gxp_telemetry_vma_open(struct vm_area_struct *vma)
+{
+ struct gxp_dev *gxp;
+ struct buffer_data *data;
+
+ gxp = ((struct telemetry_vma_data *)vma->vm_private_data)->gxp;
+ data = ((struct telemetry_vma_data *)vma->vm_private_data)->data;
+
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ refcount_inc(&data->ref_count);
+
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+}
+
+static void gxp_telemetry_vma_close(struct vm_area_struct *vma)
+{
+ struct gxp_dev *gxp;
+ struct buffer_data *data;
+ u8 type;
+ int i;
+
+ gxp = ((struct telemetry_vma_data *)vma->vm_private_data)->gxp;
+ data = ((struct telemetry_vma_data *)vma->vm_private_data)->data;
+ type = ((struct telemetry_vma_data *)vma->vm_private_data)->type;
+
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ if (refcount_dec_and_test(&data->ref_count)) {
+ if (data->enabled)
+ gxp_telemetry_disable(gxp, type);
+
+ for (i = 0; i < GXP_NUM_CORES; i++)
+ gxp_dma_free_coherent(gxp, BIT(i), data->size,
+ data->buffers[i],
+ data->buffer_daddrs[i]);
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ gxp->telemetry_mgr->logging_buff_data = NULL;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ gxp->telemetry_mgr->tracing_buff_data = NULL;
+ break;
+ default:
+ dev_warn(gxp->dev, "%s called with invalid type %u\n",
+ __func__, type);
+ }
+ kfree(data);
+ kfree(vma->vm_private_data);
+ }
+
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+}
+
+static const struct vm_operations_struct gxp_telemetry_vma_ops = {
+ .open = gxp_telemetry_vma_open,
+ .close = gxp_telemetry_vma_close,
+};
+
+/**
+ * check_telemetry_type_availability() - Checks if @type is valid and whether
+ * buffers of that type already exists.
+ * @gxp: The GXP device to check availability for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Caller must hold the telemetry_manager's lock.
+ *
+ * Return:
+ * * 0 - @type is valid and can have new buffers created
+ * * -EBUSY - Buffers already exist for @type
+ * * -EINVAL - @type is not a valid telemetry type
+ */
+static int check_telemetry_type_availability(struct gxp_dev *gxp, u8 type)
+{
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ if (gxp->telemetry_mgr->logging_buff_data)
+ return -EBUSY;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ if (gxp->telemetry_mgr->tracing_buff_data)
+ return -EBUSY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * allocate_telemetry_buffers() - Allocate and populate a `struct buffer_data`,
+ * including allocating and mapping one coherent
+ * buffer of @size bytes per core.
+ * @gxp: The GXP device to allocate the buffers for
+ * @size: The size of buffer to allocate for each core
+ *
+ * Caller must hold the telemetry_manager's lock.
+ *
+ * Return: A pointer to the `struct buffer_data` if successful, NULL otherwise
+ */
+static struct buffer_data *allocate_telemetry_buffers(struct gxp_dev *gxp,
+ size_t size)
+{
+ struct buffer_data *data;
+ int i;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ /* Allocate cache-coherent buffers for logging/tracing to */
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ data->buffers[i] =
+ gxp_dma_alloc_coherent(gxp, BIT(i), size,
+ &data->buffer_daddrs[i],
+ GFP_KERNEL, 0);
+ if (!data->buffers[i])
+ goto err_alloc;
+ }
+ data->size = size;
+ refcount_set(&data->ref_count, 1);
+ data->enabled = false;
+
+ return data;
+
+err_alloc:
+ for (; i > 0; i--) {
+ gxp_dma_free_coherent(gxp, BIT(i - 1), size,
+ data->buffers[i - 1],
+ data->buffer_daddrs[i - 1]);
+ }
+ kfree(data);
+
+ return NULL;
+}
+
+/**
+ * remap_telemetry_buffers() - Remaps a set of telemetry buffers into a
+ * user-space vm_area.
+ * @gxp: The GXP device the buffers were allocated for
+ * @vma: A vm area to remap the buffers into
+ * @data: The data describing a set of telemetry buffers to remap
+ *
+ * Caller must hold the telemetry_manager's lock.
+ *
+ * Return:
+ * * 0 - Success
+ * * otherwise - Error returned by `remap_pfn_range()`
+ */
+static int remap_telemetry_buffers(struct gxp_dev *gxp,
+ struct vm_area_struct *vma,
+ struct buffer_data *data)
+{
+ unsigned long orig_pgoff = vma->vm_pgoff;
+ int i;
+ unsigned long offset;
+ phys_addr_t phys;
+ int ret = 0;
+
+ /* mmap the buffers */
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_pgoff = 0;
+
+ for (i = 0; i < GXP_NUM_CORES; i++) {
+ /*
+ * Remap each core's buffer a page at a time, in case it is not
+ * physically contiguous.
+ */
+ for (offset = 0; offset < data->size; offset += PAGE_SIZE) {
+ /*
+ * `virt_to_phys()` does not work on memory allocated
+ * by `dma_alloc_coherent()`, so we have to use
+ * `iommu_iova_to_phys()` instead. Since all buffers
+ * are mapped to the default domain as well as any per-
+ * core domains, we can use it here to get the physical
+ * address of any valid IOVA, regardless of its core.
+ */
+ phys = iommu_iova_to_phys(
+ iommu_get_domain_for_dev(gxp->dev),
+ data->buffer_daddrs[i] + offset);
+ ret = remap_pfn_range(
+ vma, vma->vm_start + data->size * i + offset,
+ phys >> PAGE_SHIFT, PAGE_SIZE,
+ vma->vm_page_prot);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ vma->vm_pgoff = orig_pgoff;
+ vma->vm_ops = &gxp_telemetry_vma_ops;
+
+ return ret;
+}
+
+int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma)
+{
+ int ret = 0;
+ struct telemetry_vma_data *vma_data;
+ size_t total_size = vma->vm_end - vma->vm_start;
+ size_t size = total_size / GXP_NUM_CORES;
+ struct buffer_data *data;
+ int i;
+
+ if (!gxp->telemetry_mgr)
+ return -ENODEV;
+
+ /* Total size must divide evenly into 1 page-aligned buffer per core */
+ if (!total_size || !IS_ALIGNED(total_size, PAGE_SIZE * GXP_NUM_CORES))
+ return -EINVAL;
+
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ ret = check_telemetry_type_availability(gxp, type);
+ if (ret)
+ goto err;
+
+ vma_data = kmalloc(sizeof(*vma_data), GFP_KERNEL);
+ if (!vma_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ data = allocate_telemetry_buffers(gxp, size);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err_free_vma_data;
+ }
+
+ ret = remap_telemetry_buffers(gxp, vma, data);
+ if (ret)
+ goto err_free_buffers;
+
+ vma_data->gxp = gxp;
+ vma_data->data = data;
+ vma_data->type = type;
+ vma->vm_private_data = vma_data;
+
+ /* Save book-keeping on the buffers in the telemetry manager */
+ if (type == GXP_TELEMETRY_TYPE_LOGGING)
+ gxp->telemetry_mgr->logging_buff_data = data;
+ else /* type == GXP_TELEMETRY_TYPE_TRACING */
+ gxp->telemetry_mgr->tracing_buff_data = data;
+
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+
+ return 0;
+
+err_free_buffers:
+ for (i = 0; i < GXP_NUM_CORES; i++)
+ gxp_dma_free_coherent(gxp, BIT(i), data->size, data->buffers[i],
+ data->buffer_daddrs[i]);
+ kfree(data);
+
+err_free_vma_data:
+ kfree(vma_data);
+
+err:
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+ return ret;
+}
+
+int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type)
+{
+ struct buffer_data *data;
+ int ret = 0;
+
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ data = gxp->telemetry_mgr->logging_buff_data;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ data = gxp->telemetry_mgr->tracing_buff_data;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!data) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ /* Populate the buffer fields in firmware-data */
+ gxp_fw_data_set_telemetry_descriptors(
+ gxp, type, (u32 *)data->buffer_daddrs, data->size);
+
+ /* TODO(b/202937192) To be done in a future CL */
+ /* Notify any running cores that firmware-data was updated */
+
+ data->enabled = true;
+
+out:
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+
+ return ret;
+}
+
+int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type)
+{
+ struct buffer_data *data;
+ int ret = 0;
+ u32 null_daddrs[GXP_NUM_CORES] = {0};
+
+ mutex_lock(&gxp->telemetry_mgr->lock);
+
+ /* Cleanup telemetry manager's book-keeping */
+ switch (type) {
+ case GXP_TELEMETRY_TYPE_LOGGING:
+ data = gxp->telemetry_mgr->logging_buff_data;
+ break;
+ case GXP_TELEMETRY_TYPE_TRACING:
+ data = gxp->telemetry_mgr->tracing_buff_data;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!data) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (!data->enabled)
+ goto out;
+
+ /* Clear the log buffer fields in firmware-data */
+ gxp_fw_data_set_telemetry_descriptors(gxp, type, null_daddrs, 0);
+
+ /* TODO(b/202937192) To be done in a future CL */
+ /* Notify any running cores that firmware-data was updated */
+ /* Wait for ACK from firmware */
+
+ data->enabled = false;
+
+out:
+ mutex_unlock(&gxp->telemetry_mgr->lock);
+
+ return ret;
+}
diff --git a/gxp-telemetry.h b/gxp-telemetry.h
new file mode 100644
index 0000000..f481577
--- /dev/null
+++ b/gxp-telemetry.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GXP telemetry support
+ *
+ * Copyright (C) 2021 Google LLC
+ */
+#ifndef __GXP_TELEMETRY_H__
+#define __GXP_TELEMETRY_H__
+
+#include <linux/refcount.h>
+#include <linux/types.h>
+
+#include "gxp.h"
+#include "gxp-internal.h"
+
+struct gxp_telemetry_manager {
+ struct buffer_data {
+ void *buffers[GXP_NUM_CORES];
+ dma_addr_t buffer_daddrs[GXP_NUM_CORES];
+ u32 size;
+ refcount_t ref_count;
+ bool enabled;
+ } *logging_buff_data, *tracing_buff_data;
+ /* Protects logging_buff_data and tracing_buff_data */
+ struct mutex lock;
+};
+
+/**
+ * gxp_telemetry_init() - Initialize telemetry support
+ * @gxp: The GXP device to initialize telemetry support for
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENOMEM - Insufficient memory is available to initialize support
+ */
+int gxp_telemetry_init(struct gxp_dev *gxp);
+
+/**
+ * gxp_telemetry_mmap_buffers() - Allocate a telemetry buffer for each core and
+ * map them to their core and the user-space vma
+ * @gxp: The GXP device to create the buffers for
+ * @type: EIther `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ * @vma: The vma from user-space which all cores' buffers will be mapped into
+ *
+ * Return:
+ * * 0 - Success
+ * * -ENODEV - Telemetry support has not been initialized. Must explicitly
+ * check this, since this function is called based on user-input.
+ * * -EBUSY - The requested telemetry @type is already in use
+ * * -EINVAL - Either the vma size is not aligned or @type is not valid
+ * * -ENOMEM - Insufficient memory is available to allocate and map the buffers
+ */
+int gxp_telemetry_mmap_buffers(struct gxp_dev *gxp, u8 type,
+ struct vm_area_struct *vma);
+
+/**
+ * gxp_telemetry_enable() - Enable logging or tracing for all DSP cores
+ * @gxp: The GXP device to enable either logging or tracing for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ * * -ENXIO - Buffers for @type have not been created/mapped yet
+ */
+int gxp_telemetry_enable(struct gxp_dev *gxp, u8 type);
+
+/**
+ * gxp_telemetry_disable() - Disable logging or tracing for all DSP cores
+ * @gxp: The GXP device to disable either logging or tracing for
+ * @type: Either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ *
+ * Return:
+ * * 0 - Success
+ * * -EINVAL - The @type provided is not valid
+ * * -ENXIO - Buffers for @type have not been created/mapped yet
+ */
+int gxp_telemetry_disable(struct gxp_dev *gxp, u8 type);
+
+#endif /* __GXP_TELEMETRY_H__ */
diff --git a/gxp-tmp.h b/gxp-tmp.h
index 1e92420..088dcdf 100644
--- a/gxp-tmp.h
+++ b/gxp-tmp.h
@@ -9,8 +9,17 @@
/* TODO (b/176979630): Delete gxp.tmp.h. Move definitions to gxp-config.h */
-#define AURORA_SCRATCHPAD_OFF 0x00F00000 /* Last 1M of ELF load region */
-#define AURORA_SCRATCHPAD_LEN 0x00100000 /* 1M */
+#if !IS_ENABLED(CONFIG_GXP_TEST)
+
+#define AURORA_SCRATCHPAD_OFF 0x00F00000 /* Last 1M of ELF load region */
+#define AURORA_SCRATCHPAD_LEN 0x00100000 /* 1M */
+
+#else /* CONFIG_GXP_TEST */
+/* Firmware memory is shrinked in unit tests. */
+#define AURORA_SCRATCHPAD_OFF 0x000F0000
+#define AURORA_SCRATCHPAD_LEN 0x00010000
+
+#endif /* CONFIG_GXP_TEST */
#define Q7_ALIVE_MAGIC 0x55555555
diff --git a/gxp-vd.c b/gxp-vd.c
index 2e82792..fcb99ea 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include "gxp-dma.h"
#include "gxp-firmware.h"
#include "gxp-firmware-data.h"
#include "gxp-internal.h"
@@ -198,6 +199,7 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
client->gxp = gxp;
client->vd_allocated = false;
client->app = NULL;
+ client->tpu_mbx_allocated = false;
return client;
}
@@ -206,7 +208,17 @@ void gxp_client_destroy(struct gxp_client *client)
struct gxp_dev *gxp = client->gxp;
mutex_lock(&gxp->vd_lock);
+
+#ifdef CONFIG_ANDROID
+ /*
+ * Unmap TPU buffers, if the mapping is already removed, this
+ * is a no-op.
+ */
+ gxp_dma_unmap_tpu_buffer(gxp, client->mbx_desc);
+#endif // CONFIG_ANDROID
gxp_vd_release(client);
+
mutex_unlock(&gxp->vd_lock);
+
kfree(client);
}
diff --git a/gxp.h b/gxp.h
index ef47f58..1982667 100644
--- a/gxp.h
+++ b/gxp.h
@@ -10,6 +10,15 @@
#include <linux/ioctl.h>
#include <linux/types.h>
+/*
+ * mmap offsets for logging and tracing buffers
+ * Requested size will be divided evenly among all cores. The whole buffer
+ * must be page-aligned, and the size of each core's buffer must be a multiple
+ * of PAGE_SIZE.
+ */
+#define GXP_MMAP_LOG_BUFFER_OFFSET 0x10000
+#define GXP_MMAP_TRACE_BUFFER_OFFSET 0x20000
+
#define GXP_IOCTL_BASE 0xEE
/* GXP map flag macros */
@@ -217,4 +226,168 @@ struct gxp_virtual_device_ioctl {
#define GXP_ALLOCATE_VIRTUAL_DEVICE \
_IOWR(GXP_IOCTL_BASE, 6, struct gxp_virtual_device_ioctl)
+#define ETM_TRACE_LSB_MASK 0x1
+#define ETM_TRACE_SYNC_MSG_PERIOD_MIN 8
+#define ETM_TRACE_SYNC_MSG_PERIOD_MAX 256
+#define ETM_TRACE_PC_MATCH_MASK_LEN_MAX 31
+
+/*
+ * For all *_enable and pc_match_sense fields, only the least significant bit is
+ * considered. All other bits are ignored.
+ */
+struct gxp_etm_trace_start_ioctl {
+ __u16 virtual_core_id;
+ __u8 trace_ram_enable; /* Enables local trace memory. */
+ /* When set, trace output is sent out on the ATB interface. */
+ __u8 atb_enable;
+ /* Enables embedding timestamp information in trace messages. */
+ __u8 timestamp_enable;
+ /*
+ * Determines the rate at which synchronization messages are
+ * automatically emitted in the output trace.
+ * Valid values: 0, 8, 16, 32, 64, 128, 256
+ * Eg. A value of 16 means 1 synchronization message will be emitted
+ * every 16 messages.
+ * A value of 0 means no synchronization messages will be emitted.
+ */
+ __u16 sync_msg_period;
+ __u8 pc_match_enable; /* PC match causes Stop trigger. */
+ /*
+ * 32-bit address to compare to processor PC when pc_match_enable = 1.
+ * A match for a given executed instruction triggers trace stop.
+ * Note: trigger_pc is ignored when pc_match_enable = 0.
+ */
+ __u32 trigger_pc;
+ /*
+ * Indicates how many of the lower bits of trigger_pc to ignore.
+ * Valid values: 0 to 31
+ * Note: pc_match_mask_length is ignored when pc_match_enable = 0.
+ */
+ __u8 pc_match_mask_length;
+ /* When 0, match when the processor's PC is in-range of trigger_pc and
+ * mask. When 1, match when the processor's PC is out-of-range of
+ * trigger_pc and mask.
+ * Note: pc_match_sense is ignored when pc_match_enable = 0.
+ */
+ __u8 pc_match_sense;
+};
+
+/* Configure ETM trace registers and start ETM tracing. */
+#define GXP_ETM_TRACE_START_COMMAND \
+ _IOW(GXP_IOCTL_BASE, 7, struct gxp_etm_trace_start_ioctl)
+
+/*
+ * Halts trace generation via a software trigger. The virtual core id is passed
+ * in as an input.
+ */
+#define GXP_ETM_TRACE_SW_STOP_COMMAND \
+ _IOW(GXP_IOCTL_BASE, 8, __u16)
+
+/*
+ * Users should call this IOCTL after tracing has been stopped for the last
+ * trace session of the core. Otherwise, there is a risk of having up to 3 bytes
+ * of trace data missing towards the end of the trace session.
+ * This is a workaround for b/180728272 and b/181623511.
+ * The virtual core id is passed in as an input.
+ */
+#define GXP_ETM_TRACE_CLEANUP_COMMAND \
+ _IOW(GXP_IOCTL_BASE, 9, __u16)
+
+#define GXP_TRACE_HEADER_SIZE 256
+#define GXP_TRACE_RAM_SIZE 4096
+struct gxp_etm_get_trace_info_ioctl {
+ /*
+ * Input:
+ * The virtual core to fetch a response from.
+ */
+ __u16 virtual_core_id;
+ /*
+ * Input:
+ * The type of data to retrieve.
+ * 0: Trace Header only
+ * 1: Trace Header + Trace Data in Trace RAM
+ */
+ __u8 type;
+ /*
+ * Input:
+ * Trace header user space address to contain trace header information
+ * that is used for decoding the trace.
+ */
+ __u64 trace_header_addr;
+ /*
+ * Input:
+ * Trace data user space address to contain Trace RAM data.
+ * Note: trace_data field will be empty if type == 0
+ */
+ __u64 trace_data_addr;
+};
+
+/* Retrieves trace header and/or trace data for decoding purposes. */
+#define GXP_ETM_GET_TRACE_INFO_COMMAND \
+ _IOWR(GXP_IOCTL_BASE, 10, struct gxp_etm_get_trace_info_ioctl)
+
+#define GXP_TELEMETRY_TYPE_LOGGING (0)
+#define GXP_TELEMETRY_TYPE_TRACING (1)
+
+/*
+ * Enable either logging or software tracing for all cores.
+ * Accepts either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ * to specify whether logging or software tracing is to be enabled.
+ *
+ * Buffers for logging or tracing must have already been mapped via an `mmap()`
+ * call with the respective offset and initialized by the client, prior to
+ * calling this ioctl.
+ *
+ * If firmware is already running on any cores, they will be signaled to begin
+ * logging/tracing to their buffers. Any cores booting after this call will
+ * begin logging/tracing as soon as their firmware is able to.
+ */
+#define GXP_ENABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 11, __u8)
+
+/*
+ * Disable either logging or software tracing for all cores.
+ * Accepts either `GXP_TELEMETRY_TYPE_LOGGING` or `GXP_TELEMETRY_TYPE_TRACING`
+ * to specify whether logging or software tracing is to be disabled.
+ *
+ * This call will block until any running cores have been notified and ACKed
+ * that they have disabled the specified telemetry type.
+ */
+#define GXP_DISABLE_TELEMETRY _IOWR(GXP_IOCTL_BASE, 12, __u8)
+
+struct gxp_tpu_mbx_queue_ioctl {
+ __u32 tpu_fd; /* TPU virtual device group fd */
+ /*
+ * Bitfield indicating which virtual cores to allocate and map the
+ * buffers for.
+ * To map for virtual core X, set bit X in this field, i.e. `1 << X`.
+ *
+ * This field is not used by the unmap IOCTL, which always unmaps the
+ * buffers for all cores it had been mapped for.
+ */
+ __u32 virtual_core_list;
+ /*
+ * The user address of an edgetpu_mailbox_attr struct, containing
+ * cmd/rsp queue size, mailbox priority and other relevant info.
+ * This structure is defined in edgetpu.h in the TPU driver.
+ */
+ __u64 attr_ptr;
+};
+
+/*
+ * Map TPU-DSP mailbox cmd/rsp queue buffers.
+ */
+#define GXP_MAP_TPU_MBX_QUEUE \
+ _IOW(GXP_IOCTL_BASE, 13, struct gxp_tpu_mbx_queue_ioctl)
+
+/*
+ * Un-map TPU-DSP mailbox cmd/rsp queue buffers previously mapped by
+ * GXP_MAP_TPU_MBX_QUEUE.
+ *
+ * Only the @tpu_fd field will be used. Other fields will be fetched
+ * from the kernel's internal records. It is recommended to use the argument
+ * that was passed in GXP_MAP_TPU_MBX_QUEUE to un-map the buffers.
+ */
+#define GXP_UNMAP_TPU_MBX_QUEUE \
+ _IOW(GXP_IOCTL_BASE, 14, struct gxp_tpu_mbx_queue_ioctl)
+
#endif /* __GXP_H__ */