summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWhi copybara merger <whitechapel-automerger@google.com>2021-10-27 13:42:05 +0530
committerSermin Aydin <sermin@google.com>2021-11-15 22:31:51 +0000
commitd0864174240fab1307908718da99efb94dafe0f7 (patch)
tree10b2e9bd5f3e02cf2e7c47ce04dfb96180bb27c5
parent43ec0564e7ee9d0ccf24a451b7f696a8507582b8 (diff)
downloadedgetpu-d0864174240fab1307908718da99efb94dafe0f7.tar.gz
[Copybara Auto Merge] Merge branch 'whitechapel' into android-gs-pixel-5.10
This is QOR2 release for Kernel driver Bug:203447297 GitOrigin-RevId: 55fb0c8476374df0c4916ad500dfe4d049b8d86a Change-Id: I498ead5b3b1ff487e4b4a9a74120cec43febc862
-rw-r--r--drivers/edgetpu/Kbuild10
-rw-r--r--drivers/edgetpu/Kconfig8
-rw-r--r--drivers/edgetpu/Makefile5
-rw-r--r--drivers/edgetpu/abrolhos-debug-dump.c158
-rw-r--r--drivers/edgetpu/abrolhos-debug-dump.h16
-rw-r--r--drivers/edgetpu/abrolhos-device.c64
-rw-r--r--drivers/edgetpu/abrolhos-firmware.c227
-rw-r--r--drivers/edgetpu/abrolhos-platform.c333
-rw-r--r--drivers/edgetpu/abrolhos-platform.h56
-rw-r--r--drivers/edgetpu/abrolhos-pm.c755
-rw-r--r--drivers/edgetpu/abrolhos-thermal.c454
-rw-r--r--drivers/edgetpu/abrolhos/config-pwr-state.h (renamed from drivers/edgetpu/abrolhos-pm.h)80
-rw-r--r--drivers/edgetpu/abrolhos/config.h3
-rw-r--r--drivers/edgetpu/edgetpu-config.h16
-rw-r--r--drivers/edgetpu/edgetpu-core.c77
-rw-r--r--drivers/edgetpu/edgetpu-debug-dump.c51
-rw-r--r--drivers/edgetpu/edgetpu-debug-dump.h19
-rw-r--r--drivers/edgetpu/edgetpu-device-group.c92
-rw-r--r--drivers/edgetpu/edgetpu-device-group.h10
-rw-r--r--drivers/edgetpu/edgetpu-dmabuf.c49
-rw-r--r--drivers/edgetpu/edgetpu-external.c164
-rw-r--r--drivers/edgetpu/edgetpu-firmware.c138
-rw-r--r--drivers/edgetpu/edgetpu-firmware.h49
-rw-r--r--drivers/edgetpu/edgetpu-fs.c98
-rw-r--r--drivers/edgetpu/edgetpu-google-iommu.c65
-rw-r--r--drivers/edgetpu/edgetpu-internal.h29
-rw-r--r--drivers/edgetpu/edgetpu-iremap-pool.c21
-rw-r--r--drivers/edgetpu/edgetpu-kci.c63
-rw-r--r--drivers/edgetpu/edgetpu-kci.h8
-rw-r--r--drivers/edgetpu/edgetpu-mailbox.c66
-rw-r--r--drivers/edgetpu/edgetpu-mailbox.h13
-rw-r--r--drivers/edgetpu/edgetpu-mapping.c18
-rw-r--r--drivers/edgetpu/edgetpu-mapping.h35
-rw-r--r--drivers/edgetpu/edgetpu-mmu.h10
-rw-r--r--drivers/edgetpu/edgetpu-mobile-platform.c366
-rw-r--r--drivers/edgetpu/edgetpu-mobile-platform.h124
-rw-r--r--drivers/edgetpu/edgetpu-pm.c54
-rw-r--r--drivers/edgetpu/edgetpu-pm.h2
-rw-r--r--drivers/edgetpu/edgetpu-shared-fw.c360
-rw-r--r--drivers/edgetpu/edgetpu-shared-fw.h81
-rw-r--r--drivers/edgetpu/edgetpu-telemetry.c130
-rw-r--r--drivers/edgetpu/edgetpu-telemetry.h21
-rw-r--r--drivers/edgetpu/edgetpu-usage-stats.c46
-rw-r--r--drivers/edgetpu/edgetpu.h14
-rw-r--r--drivers/edgetpu/include/linux/acpm_dvfs.h40
-rw-r--r--drivers/edgetpu/include/linux/gsa/gsa_tpu.h53
-rw-r--r--drivers/edgetpu/mobile-debug-dump.c132
-rw-r--r--drivers/edgetpu/mobile-debug-dump.h52
-rw-r--r--drivers/edgetpu/mobile-firmware.c514
-rw-r--r--drivers/edgetpu/mobile-firmware.h41
-rw-r--r--drivers/edgetpu/mobile-pm.c755
-rw-r--r--drivers/edgetpu/mobile-pm.h89
-rw-r--r--drivers/edgetpu/mobile-thermal.c455
53 files changed, 3706 insertions, 2883 deletions
diff --git a/drivers/edgetpu/Kbuild b/drivers/edgetpu/Kbuild
index f1e7f56..83dd828 100644
--- a/drivers/edgetpu/Kbuild
+++ b/drivers/edgetpu/Kbuild
@@ -1,7 +1,7 @@
obj-m += abrolhos.o
-ccflags-y += -DCONFIG_EDGETPU_TELEMETRY_TRACE=1 -I$(src)/include
# Use the absolute path of this Makefile to get the source directory.
CURRENT_DIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
+ccflags-y += -DCONFIG_EDGETPU_TELEMETRY_TRACE=1 -I$(CURRENT_DIR)/include
GIT_PATH=$(CURRENT_DIR)/../../
ifeq ($(shell git --git-dir=$(GIT_PATH)/.git rev-parse --is-inside-work-tree),true)
GIT_REPO_STATE=$(shell (git --git-dir=$(GIT_PATH)/.git --work-tree=$(GIT_PATH) status --porcelain | grep -q .) && echo -dirty)
@@ -10,9 +10,13 @@ else
ccflags-y += -DGIT_REPO_TAG=\"Not\ a\ git\ repository\"
endif
-edgetpu-fw-objs := edgetpu-firmware.o edgetpu-firmware-util.o edgetpu-shared-fw.o
-edgetpu-objs := edgetpu-mailbox.o edgetpu-kci.o edgetpu-telemetry.o edgetpu-mapping.o edgetpu-dmabuf.o edgetpu-async.o edgetpu-iremap-pool.o edgetpu-sw-watchdog.o $(edgetpu-fw-objs)
+edgetpu-objs := edgetpu-mailbox.o edgetpu-kci.o edgetpu-telemetry.o edgetpu-mapping.o edgetpu-dmabuf.o edgetpu-async.o edgetpu-iremap-pool.o edgetpu-sw-watchdog.o edgetpu-firmware.o edgetpu-firmware-util.o
+
abrolhos-y := abrolhos-device.o abrolhos-device-group.o abrolhos-fs.o abrolhos-core.o abrolhos-platform.o abrolhos-firmware.o abrolhos-thermal.o abrolhos-pm.o abrolhos-iommu.o abrolhos-debug-dump.o abrolhos-usage-stats.o abrolhos-wakelock.o $(edgetpu-objs)
+
+
+
+
CFLAGS_abrolhos-fs.o := -DCONFIG_ABROLHOS=1
CFLAGS_abrolhos-core.o := -DCONFIG_ABROLHOS=1
CFLAGS_abrolhos-device.o := -DCONFIG_ABROLHOS=1
diff --git a/drivers/edgetpu/Kconfig b/drivers/edgetpu/Kconfig
index fe40029..02558be 100644
--- a/drivers/edgetpu/Kconfig
+++ b/drivers/edgetpu/Kconfig
@@ -31,14 +31,6 @@ config EDGETPU_EXTERNAL_WRAPPER_CLASS
external classes that wrap the EdgeTPU core driver and is not
intended for interactive use.
-config EDGETPU_FPGA
- bool "Build for EdgeTPU chip FPGA emulation"
- depends on EDGETPU_FRAMEWORK
- default n
- help
- Say Y to build for HAPS/Palladium/etc. FPGA emulators, or N to build
- silicon with full number of tiles.
-
config EDGETPU_TELEMETRY_TRACE
bool "Build EdgeTPU driver with firmware tracing support"
depends on EDGETPU_FRAMEWORK
diff --git a/drivers/edgetpu/Makefile b/drivers/edgetpu/Makefile
index b30f95f..3c2f832 100644
--- a/drivers/edgetpu/Makefile
+++ b/drivers/edgetpu/Makefile
@@ -13,11 +13,10 @@ else
ccflags-y += -DGIT_REPO_TAG=\"Not\ a\ git\ repository\"
endif
-edgetpu-fw-objs := edgetpu-firmware-util.o edgetpu-firmware.o edgetpu-shared-fw.o
edgetpu-objs := edgetpu-async.o edgetpu-dmabuf.o edgetpu-iremap-pool.o \
edgetpu-kci.o edgetpu-mailbox.o edgetpu-mapping.o \
edgetpu-sw-watchdog.o edgetpu-telemetry.o \
- $(edgetpu-fw-objs)
+ edgetpu-firmware-util.o edgetpu-firmware.o
abrolhos-objs := abrolhos-core.o abrolhos-debug-dump.o \
abrolhos-device-group.o abrolhos-device.o \
@@ -26,6 +25,8 @@ abrolhos-objs := abrolhos-core.o abrolhos-debug-dump.o \
abrolhos-usage-stats.o abrolhos-wakelock.o \
$(edgetpu-objs)
+
+
KBUILD_OPTIONS += CONFIG_ABROLHOS=m
modules modules_install clean:
diff --git a/drivers/edgetpu/abrolhos-debug-dump.c b/drivers/edgetpu/abrolhos-debug-dump.c
index a4dd732..a56808c 100644
--- a/drivers/edgetpu/abrolhos-debug-dump.c
+++ b/drivers/edgetpu/abrolhos-debug-dump.c
@@ -1,12 +1,32 @@
// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implements chip specific details of debug dump memory initialization and SSCD registration.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#if IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP) || IS_ENABLED(CONFIG_EDGETPU_TEST)
-#include <linux/platform_data/sscoredump.h>
#include <linux/platform_device.h>
-#include <linux/slab.h>
#include "abrolhos-platform.h"
-#include "edgetpu-debug-dump.c"
+#include "mobile-debug-dump.c"
+
+static void sscd_release(struct device *dev)
+{
+ pr_debug(DRIVER_NAME " release\n");
+}
+static struct sscd_platform_data sscd_pdata;
+static struct platform_device sscd_dev = {
+ .name = DRIVER_NAME,
+ .driver_override = SSCD_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &sscd_pdata,
+ .release = sscd_release,
+ },
+};
static int abrolhos_sscd_generate_coredump(void *p_etdev, void *p_dump_setup)
{
@@ -19,9 +39,14 @@ static int abrolhos_sscd_generate_coredump(void *p_etdev, void *p_dump_setup)
struct edgetpu_debug_dump *debug_dump;
struct edgetpu_crash_reason *crash_reason;
struct edgetpu_dump_segment *dump_seg;
+ struct edgetpu_device_group *group;
+ struct edgetpu_device_group **groups;
+ struct edgetpu_list_group *g;
+ struct mobile_sscd_mappings_dump *mappings_dump = NULL;
char crash_info[128];
- int dump_segments_num;
+ int sscd_dump_segments_num;
int i, ret;
+ size_t num_groups = 0, num_queues = 0;
u64 offset;
if (!p_etdev || !p_dump_setup)
@@ -29,7 +54,7 @@ static int abrolhos_sscd_generate_coredump(void *p_etdev, void *p_dump_setup)
etdev = (struct edgetpu_dev *)p_etdev;
dump_setup = (struct edgetpu_debug_dump_setup *)p_dump_setup;
- pdev = container_of(etdev, struct abrolhos_platform_dev, edgetpu_dev);
+ pdev = to_abrolhos_dev(etdev);
pdata = (struct sscd_platform_data *)pdev->sscd_info.pdata;
sscd_dev = (struct platform_device *)pdev->sscd_info.dev;
if (!pdata->sscd_report) {
@@ -37,48 +62,85 @@ static int abrolhos_sscd_generate_coredump(void *p_etdev, void *p_dump_setup)
return -ENOENT;
}
- offset = sizeof(struct edgetpu_debug_dump_setup);
- debug_dump = (struct edgetpu_debug_dump *)((u64 *)dump_setup +
- word_align_offset(offset));
+ debug_dump = (struct edgetpu_debug_dump *)(dump_setup + 1);
/* Populate crash reason */
- crash_reason = (struct edgetpu_crash_reason *)((u64 *)dump_setup +
- word_align_offset(debug_dump->crash_reason_offset));
+ crash_reason = (struct edgetpu_crash_reason *)((u8 *)dump_setup +
+ debug_dump->crash_reason_offset);
scnprintf(crash_info, sizeof(crash_info),
- "[edgetpu_coredump] error code: 0x%llx", crash_reason->code);
+ "[edgetpu_coredump] error code: %#llx", crash_reason->code);
+
+ mutex_lock(&etdev->groups_lock);
+ groups = kmalloc_array(etdev->n_groups, sizeof(*groups), GFP_KERNEL);
+ if (!groups) {
+ mutex_unlock(&etdev->groups_lock);
+ return -ENOMEM;
+ }
+
+ etdev_for_each_group(etdev, g, group) {
+ if (edgetpu_device_group_is_disbanded(group))
+ continue;
+ groups[num_groups++] = edgetpu_device_group_get(group);
+ }
+ mutex_unlock(&etdev->groups_lock);
- /* Populate dump segments */
- dump_segments_num = debug_dump->dump_segments_num;
- segs = kmalloc_array(dump_segments_num,
+ /* Allocate memory for dump segments */
+ sscd_dump_segments_num = debug_dump->dump_segments_num;
+ sscd_dump_segments_num += 2 * num_groups; /* VII cmd and resp queues */
+ sscd_dump_segments_num += num_groups ? 1 : 0; /* Mappings info */
+ sscd_dump_segments_num += 2; /* KCI cmd and resp queues */
+
+ segs = kmalloc_array(sscd_dump_segments_num,
sizeof(struct sscd_segment),
GFP_KERNEL);
- if (!segs)
- return -ENOMEM;
+ if (!segs) {
+ ret = -ENOMEM;
+ goto out_sscd_generate_coredump;
+ }
- dump_seg = (struct edgetpu_dump_segment *)((u64 *)dump_setup +
- word_align_offset(debug_dump->dump_segments_offset));
- offset = debug_dump->dump_segments_offset +
- sizeof(struct edgetpu_dump_segment);
- for (i = 0; i < dump_segments_num; i++) {
- segs[i].addr = &dump_seg[i].src_addr + 1;
- segs[i].size = dump_seg[i].size;
+ /* Populate sscd segments */
+ dump_seg = (struct edgetpu_dump_segment *)((u8 *)dump_setup +
+ debug_dump->dump_segments_offset);
+ offset = debug_dump->dump_segments_offset;
+ for (i = 0; i < debug_dump->dump_segments_num; i++) {
+ segs[i].addr = dump_seg;
+ segs[i].size = sizeof(struct edgetpu_dump_segment) + dump_seg->size;
segs[i].paddr = (void *)(etdev->debug_dump_mem.tpu_addr +
offset);
segs[i].vaddr = (void *)(etdev->debug_dump_mem.vaddr +
offset);
offset += sizeof(struct edgetpu_dump_segment) + dump_seg->size;
dump_seg = (struct edgetpu_dump_segment *)
- ((u64 *)dump_seg + word_align_offset(
- sizeof(struct edgetpu_dump_segment) +
- dump_seg->size));
+ ((u8 *)dump_setup + ALIGN(offset, sizeof(uint64_t)));
+ }
+
+ if (num_groups) {
+ mappings_dump = mobile_sscd_collect_mappings_segment(groups, num_groups, &segs[i]);
+ if (!mappings_dump) {
+ ret = -ENOMEM;
+ goto out_sscd_generate_coredump;
+ }
+ i++;
}
+ num_queues = mobile_sscd_collect_cmd_resp_queues(etdev, groups, num_groups, &segs[i]);
+
+ /* Adjust num of segments as some groups may have a detached mailbox */
+ sscd_dump_segments_num -= (2 * num_groups + 2); /* Subtract number of VII and KCI queues
+ * according to num_groups.
+ */
+ sscd_dump_segments_num += num_queues; /* Add actual number of valid VII and KCI queues */
+
/* Pass dump data to SSCD daemon */
- etdev_dbg(etdev, "report: %d segments", dump_segments_num);
- ret = pdata->sscd_report(sscd_dev, segs, dump_segments_num,
+ etdev_dbg(etdev, "report: %d segments", sscd_dump_segments_num);
+ ret = pdata->sscd_report(sscd_dev, segs, sscd_dump_segments_num,
SSCD_FLAGS_ELFARM64HDR, crash_info);
-
+out_sscd_generate_coredump:
+ for (i = 0; i < num_groups; i++)
+ edgetpu_device_group_put(groups[i]);
+ kfree(mappings_dump);
kfree(segs);
+ kfree(groups);
return ret;
}
@@ -88,9 +150,18 @@ int edgetpu_debug_dump_init(struct edgetpu_dev *etdev)
size_t size;
int ret;
struct edgetpu_debug_dump_setup *dump_setup;
+ struct abrolhos_platform_dev *pdev;
+
+ pdev = to_abrolhos_dev(etdev);
size = EDGETPU_DEBUG_DUMP_MEM_SIZE;
+ /* Register SSCD platform device */
+ ret = platform_device_register(&sscd_dev);
+ if (ret) {
+ etdev_err(etdev, "SSCD platform device registration failed: %d", ret);
+ return ret;
+ }
/*
* Allocate a buffer for various dump segments
*/
@@ -99,24 +170,29 @@ int edgetpu_debug_dump_init(struct edgetpu_dev *etdev)
if (ret) {
etdev_err(etdev, "Debug dump seg alloc failed");
etdev->debug_dump_mem.vaddr = NULL;
- return ret;
+ goto out_unregister_platform;
}
dump_setup =
(struct edgetpu_debug_dump_setup *)etdev->debug_dump_mem.vaddr;
+ memset(dump_setup, 0, size);
dump_setup->dump_mem_size = size;
- memset(dump_setup, 0, dump_setup->dump_mem_size);
/*
* Allocate memory for debug dump handlers
*/
- etdev->debug_dump_handlers = kcalloc(DUMP_REQ_REASON_NUM,
+ etdev->debug_dump_handlers = kcalloc(DUMP_REASON_NUM,
sizeof(*etdev->debug_dump_handlers),
GFP_KERNEL);
if (!etdev->debug_dump_handlers)
return -ENOMEM;
- etdev->debug_dump_handlers[DUMP_REQ_REASON_BY_USER] =
+ etdev->debug_dump_handlers[DUMP_REASON_REQ_BY_USER] =
abrolhos_sscd_generate_coredump;
+ pdev->sscd_info.pdata = &sscd_pdata;
+ pdev->sscd_info.dev = &sscd_dev;
+ return ret;
+out_unregister_platform:
+ platform_device_unregister(&sscd_dev);
return ret;
}
@@ -132,4 +208,20 @@ void edgetpu_debug_dump_exit(struct edgetpu_dev *etdev)
edgetpu_free_coherent(etdev, &etdev->debug_dump_mem,
EDGETPU_CONTEXT_KCI);
kfree(etdev->debug_dump_handlers);
+ platform_device_unregister(&sscd_dev);
+}
+
+#else /* IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP) || IS_ENABLED(CONFIG_EDGETPU_TEST) */
+
+#include "edgetpu-debug-dump.c"
+
+int edgetpu_debug_dump_init(struct edgetpu_dev *etdev)
+{
+ return 0;
}
+
+void edgetpu_debug_dump_exit(struct edgetpu_dev *etdev)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_SUBSYSTEM_COREDUMP) || IS_ENABLED(CONFIG_EDGETPU_TEST) */
diff --git a/drivers/edgetpu/abrolhos-debug-dump.h b/drivers/edgetpu/abrolhos-debug-dump.h
deleted file mode 100644
index 62ef111..0000000
--- a/drivers/edgetpu/abrolhos-debug-dump.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Module that defines structure to retrieve debug dump segments
- * from abrolhos firmware.
- *
- * Copyright (C) 2020 Google, Inc.
- */
-#ifndef __ABROLHOS_DEBUG_DUMP_H__
-#define __ABROLHOS_DEBUG_DUMP_H__
-
-struct abrolhos_sscd_info {
- void *pdata; /* SSCD platform data */
- void *dev; /* SSCD platform device */
-};
-
-#endif /* ABROLHOS_DEBUG_DUMP_H_ */
diff --git a/drivers/edgetpu/abrolhos-device.c b/drivers/edgetpu/abrolhos-device.c
index 8af3613..c2f118f 100644
--- a/drivers/edgetpu/abrolhos-device.c
+++ b/drivers/edgetpu/abrolhos-device.c
@@ -8,13 +8,13 @@
#include <linux/irqreturn.h>
#include "abrolhos-platform.h"
-#include "abrolhos-pm.h"
#include "edgetpu-config.h"
#include "edgetpu-debug-dump.h"
#include "edgetpu-internal.h"
#include "edgetpu-mailbox.h"
#include "edgetpu-telemetry.h"
#include "edgetpu-wakelock.h"
+#include "mobile-pm.h"
#define HOST_NONSECURE_INTRSRCMASKREG 0x000f0004
@@ -86,18 +86,18 @@ u64 edgetpu_chip_tpu_timestamp(struct edgetpu_dev *etdev)
void edgetpu_chip_init(struct edgetpu_dev *etdev)
{
int i;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
/* Disable the CustomBlock Interrupt. */
edgetpu_dev_write_32(etdev, HOST_NONSECURE_INTRSRCMASKREG, 0x1);
- if (!abpdev->ssmt_base)
+ if (!etmdev->ssmt_base)
return;
/* Setup non-secure SCIDs, assume VID = SCID */
for (i = 0; i < EDGETPU_NCONTEXTS; i++) {
- writel(i, SSMT_NS_READ_STREAM_VID_REG(abpdev->ssmt_base, i));
- writel(i, SSMT_NS_WRITE_STREAM_VID_REG(abpdev->ssmt_base, i));
+ writel(i, SSMT_NS_READ_STREAM_VID_REG(etmdev->ssmt_base, i));
+ writel(i, SSMT_NS_WRITE_STREAM_VID_REG(etmdev->ssmt_base, i));
}
}
@@ -109,25 +109,15 @@ void edgetpu_mark_probe_fail(struct edgetpu_dev *etdev)
{
}
-static void edgetpu_chip_set_pm_qos(struct edgetpu_dev *etdev, u32 value)
-{
- abrolhos_pm_set_pm_qos(etdev, value);
-}
-
-static void edgetpu_chip_set_bts(struct edgetpu_dev *etdev, u32 value)
-{
- abrolhos_pm_set_bts(etdev, value);
-}
-
void edgetpu_chip_handle_reverse_kci(struct edgetpu_dev *etdev,
struct edgetpu_kci_response_element *resp)
{
switch (resp->code) {
case RKCI_CODE_PM_QOS:
- edgetpu_chip_set_pm_qos(etdev, resp->retval);
+ mobile_pm_set_pm_qos(etdev, resp->retval);
break;
case RKCI_CODE_BTS:
- edgetpu_chip_set_bts(etdev, resp->retval);
+ mobile_pm_set_bts(etdev, resp->retval);
break;
default:
etdev_warn(etdev, "%s: Unrecognized KCI request: %u\n",
@@ -156,7 +146,7 @@ static int abrolhos_check_ext_mailbox_args(const char *func,
int edgetpu_chip_acquire_ext_mailbox(struct edgetpu_client *client,
struct edgetpu_ext_mailbox_ioctl *args)
{
- struct abrolhos_platform_dev *apdev = to_abrolhos_dev(client->etdev);
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(client->etdev);
int ret;
ret = abrolhos_check_ext_mailbox_args(__func__, client->etdev,
@@ -164,25 +154,25 @@ int edgetpu_chip_acquire_ext_mailbox(struct edgetpu_client *client,
if (ret)
return ret;
- mutex_lock(&apdev->tz_mailbox_lock);
- if (apdev->secure_client) {
+ mutex_lock(&etmdev->tz_mailbox_lock);
+ if (etmdev->secure_client) {
etdev_err(client->etdev,
"TZ mailbox already in use by PID %d\n",
- apdev->secure_client->pid);
- mutex_unlock(&apdev->tz_mailbox_lock);
+ etmdev->secure_client->pid);
+ mutex_unlock(&etmdev->tz_mailbox_lock);
return -EBUSY;
}
ret = edgetpu_mailbox_enable_ext(client, ABROLHOS_TZ_MAILBOX_ID, NULL);
if (!ret)
- apdev->secure_client = client;
- mutex_unlock(&apdev->tz_mailbox_lock);
+ etmdev->secure_client = client;
+ mutex_unlock(&etmdev->tz_mailbox_lock);
return ret;
}
int edgetpu_chip_release_ext_mailbox(struct edgetpu_client *client,
struct edgetpu_ext_mailbox_ioctl *args)
{
- struct abrolhos_platform_dev *apdev = to_abrolhos_dev(client->etdev);
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(client->etdev);
int ret = 0;
ret = abrolhos_check_ext_mailbox_args(__func__, client->etdev,
@@ -190,32 +180,32 @@ int edgetpu_chip_release_ext_mailbox(struct edgetpu_client *client,
if (ret)
return ret;
- mutex_lock(&apdev->tz_mailbox_lock);
- if (!apdev->secure_client) {
+ mutex_lock(&etmdev->tz_mailbox_lock);
+ if (!etmdev->secure_client) {
etdev_warn(client->etdev, "TZ mailbox already released\n");
- mutex_unlock(&apdev->tz_mailbox_lock);
+ mutex_unlock(&etmdev->tz_mailbox_lock);
return 0;
}
- if (apdev->secure_client != client) {
+ if (etmdev->secure_client != client) {
etdev_err(client->etdev,
"TZ mailbox owned by different client\n");
- mutex_unlock(&apdev->tz_mailbox_lock);
+ mutex_unlock(&etmdev->tz_mailbox_lock);
return -EBUSY;
}
- apdev->secure_client = NULL;
+ etmdev->secure_client = NULL;
ret = edgetpu_mailbox_disable_ext(client, ABROLHOS_TZ_MAILBOX_ID);
- mutex_unlock(&apdev->tz_mailbox_lock);
+ mutex_unlock(&etmdev->tz_mailbox_lock);
return ret;
}
void edgetpu_chip_client_remove(struct edgetpu_client *client)
{
- struct abrolhos_platform_dev *apdev = to_abrolhos_dev(client->etdev);
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(client->etdev);
- mutex_lock(&apdev->tz_mailbox_lock);
- if (apdev->secure_client == client) {
- apdev->secure_client = NULL;
+ mutex_lock(&etmdev->tz_mailbox_lock);
+ if (etmdev->secure_client == client) {
+ etmdev->secure_client = NULL;
edgetpu_mailbox_disable_ext(client, ABROLHOS_TZ_MAILBOX_ID);
}
- mutex_unlock(&apdev->tz_mailbox_lock);
+ mutex_unlock(&etmdev->tz_mailbox_lock);
}
diff --git a/drivers/edgetpu/abrolhos-firmware.c b/drivers/edgetpu/abrolhos-firmware.c
index 063d010..bb1badb 100644
--- a/drivers/edgetpu/abrolhos-firmware.c
+++ b/drivers/edgetpu/abrolhos-firmware.c
@@ -5,229 +5,4 @@
* Copyright (C) 2020 Google, Inc.
*/
-#include <linux/dma-mapping.h>
-#include <linux/gsa/gsa_tpu.h>
-#include <linux/slab.h>
-
-#include "abrolhos-platform.h"
-#include "edgetpu-config.h"
-#include "edgetpu-firmware.h"
-#include "edgetpu-internal.h"
-#include "edgetpu-kci.h"
-#include "edgetpu-mailbox.h"
-#include "mobile-firmware.h"
-
-static int abrolhos_firmware_alloc_buffer(
- struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_buffer *fw_buf)
-{
- struct edgetpu_dev *etdev = et_fw->etdev;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
-
- /* Allocate extra space the image header */
- size_t buffer_size =
- abpdev->fw_region_size + MOBILE_FW_HEADER_SIZE;
-
- fw_buf->vaddr = vmalloc(buffer_size);
- if (!fw_buf->vaddr) {
- etdev_err(etdev, "%s: failed to allocate buffer (%zu bytes)\n",
- __func__, buffer_size);
- return -ENOMEM;
- }
- fw_buf->dma_addr = 0;
- fw_buf->alloc_size = buffer_size;
- fw_buf->used_size_align = 16;
- return 0;
-}
-
-static void abrolhos_firmware_free_buffer(
- struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_buffer *fw_buf)
-{
- vfree(fw_buf->vaddr);
- fw_buf->vaddr = NULL;
- fw_buf->dma_addr = 0;
- fw_buf->alloc_size = 0;
- fw_buf->used_size_align = 0;
-}
-
-static int abrolhos_firmware_setup_buffer(
- struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_buffer *fw_buf)
-{
- return 0;
-}
-
-static void abrolhos_firmware_teardown_buffer(
- struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_buffer *fw_buf)
-{
-}
-
-static int abrolhos_firmware_restart(struct edgetpu_firmware *et_fw,
- bool force_reset)
-{
- struct edgetpu_dev *etdev = et_fw->etdev;
- struct abrolhos_platform_dev *edgetpu_pdev = to_abrolhos_dev(etdev);
- int tpu_state;
-
- /* We are in a bad state, send shutdown command and hope the device recovers */
- if (force_reset)
- gsa_send_tpu_cmd(edgetpu_pdev->gsa_dev, GSA_TPU_SHUTDOWN);
-
- tpu_state = gsa_send_tpu_cmd(edgetpu_pdev->gsa_dev, GSA_TPU_START);
-
- if (tpu_state < 0) {
- etdev_err(etdev, "GSA restart firmware failed: %d\n",
- tpu_state);
- return -EIO;
- }
-
- etdev_dbg(etdev, "Firmware restart successful\n");
-
- return 0;
-}
-
-static int abrolhos_firmware_prepare_run(struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_buffer *fw_buf)
-{
- struct edgetpu_dev *etdev = et_fw->etdev;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- void *image_vaddr, *header_vaddr;
- struct mobile_image_config *image_config;
- phys_addr_t image_start, image_end, carveout_start, carveout_end;
- dma_addr_t header_dma_addr;
- int ret, tpu_state;
-
- if (fw_buf->used_size < MOBILE_FW_HEADER_SIZE) {
- etdev_err(etdev, "Invalid buffer size: %zu < %d\n",
- fw_buf->used_size, MOBILE_FW_HEADER_SIZE);
- return -EINVAL;
- }
-
- tpu_state = gsa_send_tpu_cmd(abpdev->gsa_dev, GSA_TPU_GET_STATE);
-
- if (tpu_state < GSA_TPU_STATE_INACTIVE) {
- etdev_err(etdev, "GSA failed to retrieve current status: %d\n", tpu_state);
- return tpu_state;
- }
-
- etdev_dbg(etdev, "GSA Reports TPU state: %d\n", tpu_state);
-
- if (tpu_state > GSA_TPU_STATE_INACTIVE) {
- ret = gsa_unload_tpu_fw_image(abpdev->gsa_dev);
- if (ret) {
- etdev_warn(etdev, "GSA release failed: %d\n", ret);
- return -EIO;
- }
- }
-
- image_vaddr = memremap(abpdev->fw_region_paddr, abpdev->fw_region_size,
- MEMREMAP_WC);
-
- if (!image_vaddr) {
- etdev_err(etdev, "memremap failed\n");
- return -ENOMEM;
- }
-
- /* Skip the header */
- memcpy(image_vaddr, fw_buf->vaddr + MOBILE_FW_HEADER_SIZE,
- fw_buf->used_size - MOBILE_FW_HEADER_SIZE);
-
- /* Allocate coherent memory for the image header */
- header_vaddr = dma_alloc_coherent(abpdev->gsa_dev,
- MOBILE_FW_HEADER_SIZE,
- &header_dma_addr, GFP_KERNEL);
- if (!header_vaddr) {
- etdev_err(etdev,
- "Failed to allocate coherent memory for header\n");
- ret = -ENOMEM;
- goto out_unmap;
- }
-
- memcpy(header_vaddr, fw_buf->vaddr, MOBILE_FW_HEADER_SIZE);
- etdev_dbg(etdev,
- "Requesting GSA image load. meta = %llX payload = %llX",
- header_dma_addr, (u64)abpdev->fw_region_paddr);
-
- ret = gsa_load_tpu_fw_image(abpdev->gsa_dev, header_dma_addr,
- abpdev->fw_region_paddr);
- if (ret) {
- etdev_err(etdev, "GSA authentication failed: %d\n", ret);
- ret = -EIO;
- goto out_free_gsa;
- }
-
- /* fetch the firmware versions */
- image_config = fw_buf->vaddr + MOBILE_IMAGE_CONFIG_OFFSET;
- memcpy(&etdev->fw_version, &image_config->firmware_versions,
- sizeof(etdev->fw_version));
-
- /*
- * GSA verifies the image config addresses and sizes are valid,
- * so we don't perform overflow checks here.
- */
- image_start = (phys_addr_t)image_config->carveout_base;
- image_end = (phys_addr_t)(image_config->firmware_base +
- image_config->firmware_size - 1);
- carveout_start = abpdev->fw_region_paddr;
- carveout_end = carveout_start + abpdev->fw_region_size - 1;
-
- /* Image must fit within the carveout */
- if (image_start < carveout_start || image_end > carveout_end) {
- etdev_err(etdev, "Firmware image doesn't fit in carveout\n");
- etdev_err(etdev, "Image config: %pap - %pap\n", &image_start,
- &image_end);
- etdev_err(etdev, "Carveout: %pap - %pap\n", &carveout_start,
- &carveout_end);
- ret = -ERANGE;
- goto out_free_gsa;
- }
-
- /* Reset KCI mailbox before starting f/w, don't process anything old.*/
- edgetpu_mailbox_reset(etdev->kci->mailbox);
-
- tpu_state = gsa_send_tpu_cmd(abpdev->gsa_dev, GSA_TPU_START);
-
- if (tpu_state < 0) {
- etdev_err(etdev, "GSA start firmware failed: %d\n", tpu_state);
- ret = -EIO;
- }
-
-out_free_gsa:
- dma_free_coherent(abpdev->gsa_dev, MOBILE_FW_HEADER_SIZE,
- header_vaddr, header_dma_addr);
-out_unmap:
- memunmap(image_vaddr);
- return ret;
-}
-
-static const struct edgetpu_firmware_chip_data abrolhos_firmware_chip_data = {
- .default_firmware_name = EDGETPU_DEFAULT_FIRMWARE_NAME,
- .alloc_buffer = abrolhos_firmware_alloc_buffer,
- .free_buffer = abrolhos_firmware_free_buffer,
- .setup_buffer = abrolhos_firmware_setup_buffer,
- .teardown_buffer = abrolhos_firmware_teardown_buffer,
- .prepare_run = abrolhos_firmware_prepare_run,
- .restart = abrolhos_firmware_restart,
-};
-
-int mobile_edgetpu_firmware_create(struct edgetpu_dev *etdev)
-{
- return edgetpu_firmware_create(etdev, &abrolhos_firmware_chip_data);
-}
-
-void mobile_edgetpu_firmware_destroy(struct edgetpu_dev *etdev)
-{
- edgetpu_firmware_destroy(etdev);
-}
-
-unsigned long edgetpu_chip_firmware_iova(struct edgetpu_dev *etdev)
-{
- /*
- * There is no IOVA in Abrolhos, since firmware the IOMMU is
- * bypassed and the only translation in effect is the one
- * done by instruction remap registers
- */
- return EDGETPU_INSTRUCTION_REMAP_BASE;
-}
+#include "mobile-firmware.c"
diff --git a/drivers/edgetpu/abrolhos-platform.c b/drivers/edgetpu/abrolhos-platform.c
index 2a3c50d..b7f391e 100644
--- a/drivers/edgetpu/abrolhos-platform.c
+++ b/drivers/edgetpu/abrolhos-platform.c
@@ -6,351 +6,45 @@
*/
#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/gsa/gsa_tpu.h>
#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/iommu.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/platform_data/sscoredump.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/slab.h>
-#include <linux/types.h>
#include "abrolhos-platform.h"
-#include "abrolhos-pm.h"
#include "edgetpu-config.h"
-#include "edgetpu-firmware.h"
#include "edgetpu-internal.h"
-#include "edgetpu-iremap-pool.h"
-#include "edgetpu-mmu.h"
+#include "edgetpu-mobile-platform.h"
#include "edgetpu-pm.h"
-#include "edgetpu-telemetry.h"
-#include "mobile-firmware.h"
+
+#include "edgetpu-mobile-platform.c"
static const struct of_device_id edgetpu_of_match[] = {
- /* TODO(b/190677977): remove */
- { .compatible = "google,darwinn", },
{ .compatible = "google,edgetpu-gs101", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, edgetpu_of_match);
-static void sscd_release(struct device *dev)
-{
- pr_debug(DRIVER_NAME " release\n");
-}
-static struct sscd_platform_data sscd_pdata;
-static struct platform_device sscd_dev = {
- .name = DRIVER_NAME,
- .driver_override = SSCD_NAME,
- .id = -1,
- .dev = {
- .platform_data = &sscd_pdata,
- .release = sscd_release,
- },
-};
-
-/*
- * Log and trace buffers at the beginning of the remapped region,
- * pool memory afterwards.
- */
-
-#define EDGETPU_POOL_MEM_OFFSET (EDGETPU_TELEMETRY_BUFFER_SIZE * 2)
-
-static void abrolhos_get_telemetry_mem(struct abrolhos_platform_dev *etpdev,
- enum edgetpu_telemetry_type type,
- struct edgetpu_coherent_mem *mem)
-{
- int offset = type == EDGETPU_TELEMETRY_TRACE ?
- EDGETPU_TELEMETRY_BUFFER_SIZE :
- 0;
- mem->vaddr = etpdev->shared_mem_vaddr + offset;
- mem->dma_addr = EDGETPU_REMAPPED_DATA_ADDR + offset;
- mem->tpu_addr = EDGETPU_REMAPPED_DATA_ADDR + offset;
- mem->host_addr = 0;
- mem->size = EDGETPU_TELEMETRY_BUFFER_SIZE;
-}
-
-/* Setup the firmware region carveout. */
-static int
-edgetpu_platform_setup_fw_region(struct abrolhos_platform_dev *etpdev)
-{
- struct edgetpu_dev *etdev = &etpdev->edgetpu_dev;
- struct platform_device *gsa_pdev;
- struct device *dev = etdev->dev;
- struct resource r;
- struct device_node *np;
- int err;
- size_t region_map_size =
- EDGETPU_FW_SIZE_MAX + EDGETPU_REMAPPED_DATA_SIZE;
-
- np = of_parse_phandle(dev->of_node, "memory-region", 0);
- if (!np) {
- dev_err(dev, "No memory region for firmware\n");
- return -ENODEV;
- }
-
- err = of_address_to_resource(np, 0, &r);
- of_node_put(np);
- if (err) {
- dev_err(dev, "No memory address assigned to firmware region\n");
- return err;
- }
-
- if (resource_size(&r) < region_map_size) {
- dev_err(dev,
- "Memory region for firmware too small (%zu bytes needed, got %llu)\n",
- region_map_size, resource_size(&r));
- return -ENOSPC;
- }
-
- /* Get GSA device from device tree */
- np = of_parse_phandle(dev->of_node, "gsa-device", 0);
- if (!np) {
- dev_err(dev, "No gsa-dev in device tree\n");
- return -ENODEV;
- }
- gsa_pdev = of_find_device_by_node(np);
- if (!gsa_pdev) {
- dev_err(dev, "GSA device not found\n");
- of_node_put(np);
- return -ENODEV;
- }
- etpdev->gsa_dev = &gsa_pdev->dev;
- of_node_put(np);
-
- etpdev->fw_region_paddr = r.start;
- etpdev->fw_region_size = EDGETPU_FW_SIZE_MAX;
-
- etpdev->shared_mem_vaddr =
- memremap(r.start + EDGETPU_REMAPPED_DATA_OFFSET,
- EDGETPU_REMAPPED_DATA_SIZE, MEMREMAP_WC);
- if (!etpdev->shared_mem_vaddr) {
- dev_err(dev, "Shared memory remap failed\n");
- return -EINVAL;
- }
- etpdev->shared_mem_paddr = r.start + EDGETPU_REMAPPED_DATA_OFFSET;
-
- return 0;
-}
-
-static void edgetpu_platform_cleanup_fw_region(
- struct abrolhos_platform_dev *etpdev)
-{
- gsa_unload_tpu_fw_image(etpdev->gsa_dev);
-
- if (!etpdev->shared_mem_vaddr)
- return;
- memunmap(etpdev->shared_mem_vaddr);
- etpdev->shared_mem_vaddr = NULL;
-}
-
-int edgetpu_chip_setup_mmu(struct edgetpu_dev *etdev)
-{
- int ret;
-
- /* No MMU info to pass to attach, IOMMU API will handle. */
- ret = edgetpu_mmu_attach(etdev, NULL);
- if (ret)
- dev_err(etdev->dev, "failed to attach IOMMU: %d\n", ret);
- return ret;
-}
-
-void edgetpu_chip_remove_mmu(struct edgetpu_dev *etdev)
-{
- edgetpu_mmu_detach(etdev);
-}
-
-static int abrolhos_parse_ssmt(struct abrolhos_platform_dev *etpdev)
-{
- struct edgetpu_dev *etdev = &etpdev->edgetpu_dev;
- struct platform_device *pdev = to_platform_device(etdev->dev);
- struct resource *res;
- int rc;
- void __iomem *ssmt_base;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ssmt");
- if (!res) {
- etdev_warn(etdev, "Failed to find SSMT register base");
- return -EINVAL;
- }
- ssmt_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(ssmt_base)) {
- rc = PTR_ERR(ssmt_base);
- etdev_warn(etdev, "Failed to map SSMT register base: %d\n", rc);
- return rc;
- }
- etpdev->ssmt_base = ssmt_base;
- return 0;
-}
static int edgetpu_platform_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct abrolhos_platform_dev *abpdev;
- struct resource *r;
- struct edgetpu_mapped_resource regs;
- int ret;
- struct edgetpu_iface_params iface_params[] = {
- /* Default interface */
- { .name = NULL },
- /* Common name for embedded SoC devices */
- { .name = "edgetpu-soc" },
- };
+ struct edgetpu_mobile_platform_dev *etmdev;
- abpdev = devm_kzalloc(dev, sizeof(*abpdev), GFP_KERNEL);
+ abpdev = devm_kzalloc(&pdev->dev, sizeof(*abpdev), GFP_KERNEL);
if (!abpdev)
return -ENOMEM;
- mutex_init(&abpdev->tz_mailbox_lock);
-
- platform_set_drvdata(pdev, &abpdev->edgetpu_dev);
- abpdev->edgetpu_dev.dev = dev;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (IS_ERR_OR_NULL(r)) {
- dev_err(dev, "failed to get memory resource\n");
- return -ENODEV;
- }
- regs.phys = r->start;
- regs.size = resource_size(r);
-
- regs.mem = devm_ioremap_resource(dev, r);
- if (IS_ERR_OR_NULL(regs.mem)) {
- dev_err(dev, "failed to map registers\n");
- return -ENODEV;
- }
-
- mutex_init(&abpdev->platform_pwr.policy_lock);
- abpdev->platform_pwr.curr_policy = TPU_POLICY_MAX;
-
- ret = abrolhos_pm_create(&abpdev->edgetpu_dev);
-
- if (ret) {
- dev_err(dev, "Failed to initialize PM interface (%d)\n", ret);
- return ret;
- }
-
- ret = edgetpu_platform_setup_fw_region(abpdev);
- if (ret) {
- dev_err(dev, "%s setup fw regions failed: %d\n", DRIVER_NAME,
- ret);
- goto out_shutdown;
- }
-
- ret = edgetpu_iremap_pool_create(
- &abpdev->edgetpu_dev,
- /* Base virtual address (kernel address space) */
- abpdev->shared_mem_vaddr + EDGETPU_POOL_MEM_OFFSET,
- /* Base DMA address */
- EDGETPU_REMAPPED_DATA_ADDR + EDGETPU_POOL_MEM_OFFSET,
- /* Base TPU address */
- EDGETPU_REMAPPED_DATA_ADDR + EDGETPU_POOL_MEM_OFFSET,
- /* Base physical address */
- abpdev->shared_mem_paddr + EDGETPU_POOL_MEM_OFFSET,
- /* Size */
- EDGETPU_REMAPPED_DATA_SIZE - EDGETPU_POOL_MEM_OFFSET,
- /* Granularity */
- PAGE_SIZE);
- if (ret) {
- dev_err(dev,
- "%s failed to initialize remapped memory pool: %d\n",
- DRIVER_NAME, ret);
- goto out_cleanup_fw;
- }
-
- abpdev->edgetpu_dev.mcp_id = -1;
- abpdev->edgetpu_dev.mcp_die_index = 0;
- abpdev->irq = platform_get_irq(pdev, 0);
- ret = edgetpu_device_add(&abpdev->edgetpu_dev, &regs, iface_params,
- ARRAY_SIZE(iface_params));
-
- if (!ret && abpdev->irq >= 0)
- ret = edgetpu_register_irq(&abpdev->edgetpu_dev, abpdev->irq);
-
- if (ret) {
- dev_err(dev, "%s edgetpu setup failed: %d\n", DRIVER_NAME,
- ret);
- goto out_destroy_iremap;
- }
-
- ret = abrolhos_parse_ssmt(abpdev);
- if (ret)
- dev_warn(
- dev,
- "SSMT setup failed (%d). Context isolation not enforced\n",
- ret);
-
- abrolhos_get_telemetry_mem(abpdev, EDGETPU_TELEMETRY_LOG,
- &abpdev->log_mem);
- abrolhos_get_telemetry_mem(abpdev, EDGETPU_TELEMETRY_TRACE,
- &abpdev->trace_mem);
-
- ret = edgetpu_telemetry_init(&abpdev->edgetpu_dev, &abpdev->log_mem,
- &abpdev->trace_mem);
- if (ret)
- goto out_remove_device;
-
- ret = mobile_edgetpu_firmware_create(&abpdev->edgetpu_dev);
- if (ret) {
- dev_err(dev,
- "%s initialize firmware downloader failed: %d\n",
- DRIVER_NAME, ret);
- goto out_tel_exit;
- }
-
- dev_dbg(dev, "Creating thermal device\n");
- abpdev->edgetpu_dev.thermal =
- devm_tpu_thermal_create(dev, &abpdev->edgetpu_dev);
-
- dev_info(dev, "%s edgetpu initialized. Build: %s\n",
- abpdev->edgetpu_dev.dev_name, GIT_REPO_TAG);
-
- dev_dbg(dev, "Probe finished, powering down\n");
- /* Turn the device off unless a client request is already received. */
- edgetpu_pm_shutdown(&abpdev->edgetpu_dev, false);
-
- abpdev->sscd_info.pdata = &sscd_pdata;
- abpdev->sscd_info.dev = &sscd_dev;
-
- return ret;
-out_tel_exit:
- edgetpu_telemetry_exit(&abpdev->edgetpu_dev);
-out_remove_device:
- edgetpu_device_remove(&abpdev->edgetpu_dev);
-out_destroy_iremap:
- edgetpu_iremap_pool_destroy(&abpdev->edgetpu_dev);
-out_cleanup_fw:
- edgetpu_platform_cleanup_fw_region(abpdev);
-out_shutdown:
- dev_dbg(dev, "Probe finished with error %d, powering down\n", ret);
- edgetpu_pm_shutdown(&abpdev->edgetpu_dev, true);
- return ret;
+ etmdev = &abpdev->mobile_dev;
+ return edgetpu_mobile_platform_probe(pdev, etmdev);
}
static int edgetpu_platform_remove(struct platform_device *pdev)
{
- struct edgetpu_dev *etdev = platform_get_drvdata(pdev);
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
-
- mobile_edgetpu_firmware_destroy(etdev);
- if (abpdev->irq >= 0)
- edgetpu_unregister_irq(etdev, abpdev->irq);
-
- edgetpu_pm_get(etdev->pm);
- edgetpu_telemetry_exit(etdev);
- edgetpu_device_remove(etdev);
- edgetpu_iremap_pool_destroy(etdev);
- edgetpu_platform_cleanup_fw_region(abpdev);
- edgetpu_pm_put(etdev->pm);
- edgetpu_pm_shutdown(etdev, true);
- abrolhos_pm_destroy(etdev);
- return 0;
+ return edgetpu_mobile_platform_remove(pdev);
}
#if IS_ENABLED(CONFIG_PM_SLEEP)
@@ -393,19 +87,12 @@ static int __init edgetpu_platform_init(void)
ret = edgetpu_init();
if (ret)
return ret;
-
- /* Register SSCD platform device */
- ret = platform_device_register(&sscd_dev);
- if (ret)
- pr_err(DRIVER_NAME " SSCD platform device registration failed: %d\n",
- ret);
return platform_driver_register(&edgetpu_platform_driver);
}
static void __exit edgetpu_platform_exit(void)
{
platform_driver_unregister(&edgetpu_platform_driver);
- platform_device_unregister(&sscd_dev);
edgetpu_exit();
}
diff --git a/drivers/edgetpu/abrolhos-platform.h b/drivers/edgetpu/abrolhos-platform.h
index 068a23f..7ed88ed 100644
--- a/drivers/edgetpu/abrolhos-platform.h
+++ b/drivers/edgetpu/abrolhos-platform.h
@@ -4,61 +4,21 @@
*
* Copyright (C) 2019 Google, Inc.
*/
+
#ifndef __ABROLHOS_PLATFORM_H__
#define __ABROLHOS_PLATFORM_H__
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <soc/google/bcl.h>
-#include <soc/google/bts.h>
-#include <soc/google/exynos_pm_qos.h>
-
-#include "abrolhos-debug-dump.h"
-#include "abrolhos-pm.h"
#include "edgetpu-internal.h"
+#include "edgetpu-mobile-platform.h"
+#include "mobile-debug-dump.h"
-#define to_abrolhos_dev(etdev) \
- container_of(etdev, struct abrolhos_platform_dev, edgetpu_dev)
-
-struct abrolhos_platform_pwr {
- struct mutex policy_lock;
- enum tpu_pwr_state curr_policy;
- struct mutex state_lock;
- u64 min_state;
- u64 requested_state;
- /* INT/MIF requests for memory bandwidth */
- struct exynos_pm_qos_request int_min;
- struct exynos_pm_qos_request mif_min;
- /* BTS */
- unsigned int performance_scenario;
- int scenario_count;
- struct mutex scenario_lock;
-};
+#define to_abrolhos_dev(etdev) \
+ container_of((to_mobile_dev(etdev)), struct abrolhos_platform_dev, mobile_dev)
struct abrolhos_platform_dev {
- struct edgetpu_dev edgetpu_dev;
- struct abrolhos_platform_pwr platform_pwr;
- int irq;
- phys_addr_t fw_region_paddr;
- size_t fw_region_size;
- void *shared_mem_vaddr;
- phys_addr_t shared_mem_paddr;
- size_t shared_mem_size;
- struct device *gsa_dev;
- void __iomem *ssmt_base;
- struct edgetpu_coherent_mem log_mem;
- struct edgetpu_coherent_mem trace_mem;
- struct abrolhos_sscd_info sscd_info;
-#if IS_ENABLED(CONFIG_GOOGLE_BCL)
- struct bcl_device *bcl_dev;
-#endif
- /* Protects TZ Mailbox client pointer */
- struct mutex tz_mailbox_lock;
- /* TZ mailbox client */
- struct edgetpu_client *secure_client;
+ struct edgetpu_mobile_platform_dev mobile_dev;
+ /* subsystem coredump info struct */
+ struct mobile_sscd_info sscd_info;
};
#endif /* __ABROLHOS_PLATFORM_H__ */
diff --git a/drivers/edgetpu/abrolhos-pm.c b/drivers/edgetpu/abrolhos-pm.c
index efbd15c..8bf40e2 100644
--- a/drivers/edgetpu/abrolhos-pm.c
+++ b/drivers/edgetpu/abrolhos-pm.c
@@ -5,379 +5,19 @@
* Copyright (C) 2020 Google, Inc.
*/
-#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/gsa/gsa_tpu.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <soc/google/bcl.h>
-#include <soc/google/bts.h>
-#include <soc/google/exynos_pm_qos.h>
+#include <linux/iopoll.h>
-#include "abrolhos-platform.h"
-#include "abrolhos-pm.h"
+#include "edgetpu-config.h"
#include "edgetpu-firmware.h"
#include "edgetpu-internal.h"
-#include "edgetpu-kci.h"
-#include "edgetpu-mailbox.h"
#include "edgetpu-pm.h"
+#include "mobile-pm.h"
-#include "edgetpu-pm.c"
+#define TPU_DEFAULT_POWER_STATE TPU_DEEP_SLEEP_CLOCKS_SLOW
-/*
- * Encode INT/MIF values as a 16 bit pair in the 32-bit return value
- * (in units of MHz, to provide enough range)
- */
-#define PM_QOS_INT_SHIFT (16)
-#define PM_QOS_MIF_MASK (0xFFFF)
-#define PM_QOS_FACTOR (1000)
-
-/* Default power state: the lowest power state that keeps firmware running */
-static int power_state = TPU_DEEP_SLEEP_CLOCKS_SLOW;
-
-module_param(power_state, int, 0660);
-
-#define MAX_VOLTAGE_VAL 1250000
-
-static struct dentry *abrolhos_pwr_debugfs_dir;
-
-static int abrolhos_pwr_state_init(struct device *dev)
-{
- int ret;
- int curr_state;
-
- pm_runtime_enable(dev);
- curr_state = exynos_acpm_get_rate(TPU_ACPM_DOMAIN, 0);
-
- if (curr_state > TPU_OFF) {
- ret = pm_runtime_get_sync(dev);
- if (ret) {
- dev_err(dev, "pm_runtime_get_sync returned %d\n", ret);
- return ret;
- }
- }
-
- ret = exynos_acpm_set_init_freq(TPU_ACPM_DOMAIN, curr_state);
- if (ret) {
- dev_err(dev, "error initializing tpu state: %d\n", ret);
- if (curr_state > TPU_OFF)
- pm_runtime_put_sync(dev);
- return ret;
- }
-
- return ret;
-}
-
-static int abrolhos_pwr_state_set_locked(void *data, u64 val)
-{
- int ret;
- int curr_state;
- struct edgetpu_dev *etdev = (typeof(etdev))data;
- struct device *dev = etdev->dev;
-
- curr_state = exynos_acpm_get_rate(TPU_ACPM_DOMAIN, 0);
-
- dev_dbg(dev, "Power state %d -> %llu\n", curr_state, val);
-
- if (curr_state == TPU_OFF && val > TPU_OFF) {
- ret = pm_runtime_get_sync(dev);
- if (ret) {
- dev_err(dev, "pm_runtime_get_sync returned %d\n", ret);
- return ret;
- }
- }
-
- ret = exynos_acpm_set_rate(TPU_ACPM_DOMAIN, (unsigned long)val);
- if (ret) {
- dev_err(dev, "error setting tpu state: %d\n", ret);
- pm_runtime_put_sync(dev);
- return ret;
- }
-
- if (curr_state != TPU_OFF && val == TPU_OFF) {
- ret = pm_runtime_put_sync(dev);
- if (ret) {
- dev_err(dev, "%s: pm_runtime_put_sync returned %d\n",
- __func__, ret);
- return ret;
- }
- }
-
- return ret;
-}
-
-static int abrolhos_pwr_state_get_locked(void *data, u64 *val)
-{
- struct edgetpu_dev *etdev = (typeof(etdev))data;
- struct device *dev = etdev->dev;
-
- *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN, 0);
- dev_dbg(dev, "current tpu state: %llu\n", *val);
-
- return 0;
-}
-
-static int abrolhos_pwr_state_set(void *data, u64 val)
-{
- struct edgetpu_dev *etdev = (typeof(etdev))data;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
- int ret = 0;
-
- mutex_lock(&platform_pwr->state_lock);
- platform_pwr->requested_state = val;
- if (val >= platform_pwr->min_state)
- ret = abrolhos_pwr_state_set_locked(etdev, val);
- mutex_unlock(&platform_pwr->state_lock);
- return ret;
-}
-
-static int abrolhos_pwr_state_get(void *data, u64 *val)
-{
- struct edgetpu_dev *etdev = (typeof(etdev))data;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
- int ret;
-
- mutex_lock(&platform_pwr->state_lock);
- ret = abrolhos_pwr_state_get_locked(etdev, val);
- mutex_unlock(&platform_pwr->state_lock);
- return ret;
-}
-
-static int abrolhos_min_pwr_state_set(void *data, u64 val)
-{
- struct edgetpu_dev *etdev = (typeof(etdev))data;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
- int ret = 0;
-
- mutex_lock(&platform_pwr->state_lock);
- platform_pwr->min_state = val;
- if (val >= platform_pwr->requested_state)
- ret = abrolhos_pwr_state_set_locked(etdev, val);
- mutex_unlock(&platform_pwr->state_lock);
- return ret;
-}
-
-static int abrolhos_min_pwr_state_get(void *data, u64 *val)
-{
- struct edgetpu_dev *etdev = (typeof(etdev))data;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
-
- mutex_lock(&platform_pwr->state_lock);
- *val = platform_pwr->min_state;
- mutex_unlock(&platform_pwr->state_lock);
- return 0;
-}
-
-static int abrolhos_pwr_policy_set(void *data, u64 val)
-{
- struct abrolhos_platform_dev *abpdev = (typeof(abpdev))data;
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
- int ret;
-
- mutex_lock(&platform_pwr->policy_lock);
- ret = exynos_acpm_set_policy(TPU_ACPM_DOMAIN, val);
-
- if (ret) {
- dev_err(abpdev->edgetpu_dev.dev,
- "unable to set policy %lld (ret %d)\n", val, ret);
- mutex_unlock(&platform_pwr->policy_lock);
- return ret;
- }
-
- platform_pwr->curr_policy = val;
- mutex_unlock(&platform_pwr->policy_lock);
- return 0;
-}
-
-static int abrolhos_pwr_policy_get(void *data, u64 *val)
-{
- struct abrolhos_platform_dev *abpdev = (typeof(abpdev))data;
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
-
- mutex_lock(&platform_pwr->policy_lock);
- *val = platform_pwr->curr_policy;
- mutex_unlock(&platform_pwr->policy_lock);
-
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_pwr_policy, abrolhos_pwr_policy_get,
- abrolhos_pwr_policy_set, "%llu\n");
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_pwr_state, abrolhos_pwr_state_get,
- abrolhos_pwr_state_set, "%llu\n");
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_min_pwr_state, abrolhos_min_pwr_state_get,
- abrolhos_min_pwr_state_set, "%llu\n");
-
-static int edgetpu_core_rate_get(void *data, u64 *val)
-{
- *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
- TPU_DEBUG_REQ | TPU_CLK_CORE_DEBUG);
- return 0;
-}
-
-static int edgetpu_core_rate_set(void *data, u64 val)
-{
- unsigned long dbg_rate_req;
-
- dbg_rate_req = TPU_DEBUG_REQ | TPU_CLK_CORE_DEBUG;
- dbg_rate_req |= val;
-
- return exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_core_rate, edgetpu_core_rate_get,
- edgetpu_core_rate_set, "%llu\n");
-
-static int edgetpu_ctl_rate_get(void *data, u64 *val)
-{
- *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
- TPU_DEBUG_REQ | TPU_CLK_CTL_DEBUG);
- return 0;
-}
-
-static int edgetpu_ctl_rate_set(void *data, u64 val)
-{
- unsigned long dbg_rate_req;
-
- dbg_rate_req = TPU_DEBUG_REQ | TPU_CLK_CTL_DEBUG;
- dbg_rate_req |= 1000;
-
- return exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_ctl_rate, edgetpu_ctl_rate_get,
- edgetpu_ctl_rate_set, "%llu\n");
-
-static int edgetpu_axi_rate_get(void *data, u64 *val)
-{
- *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
- TPU_DEBUG_REQ | TPU_CLK_AXI_DEBUG);
- return 0;
-}
-
-static int edgetpu_axi_rate_set(void *data, u64 val)
-{
- unsigned long dbg_rate_req;
-
- dbg_rate_req = TPU_DEBUG_REQ | TPU_CLK_AXI_DEBUG;
- dbg_rate_req |= 1000;
-
- return exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_axi_rate, edgetpu_axi_rate_get,
- edgetpu_axi_rate_set, "%llu\n");
-
-static int edgetpu_apb_rate_get(void *data, u64 *val)
-{
- *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
- TPU_DEBUG_REQ | TPU_CLK_APB_DEBUG);
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_apb_rate, edgetpu_apb_rate_get, NULL,
- "%llu\n");
-
-static int edgetpu_uart_rate_get(void *data, u64 *val)
-{
- *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
- TPU_DEBUG_REQ | TPU_CLK_UART_DEBUG);
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_uart_rate, edgetpu_uart_rate_get, NULL,
- "%llu\n");
-
-static int edgetpu_vdd_int_m_set(void *data, u64 val)
-{
- struct device *dev = (struct device *)data;
- unsigned long dbg_rate_req;
-
- if (val > MAX_VOLTAGE_VAL) {
- dev_err(dev, "Preventing INT_M voltage > %duV",
- MAX_VOLTAGE_VAL);
- return -EINVAL;
- }
-
- dbg_rate_req = TPU_DEBUG_REQ | TPU_VDD_INT_M_DEBUG;
- dbg_rate_req |= val;
-
- return exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
-}
-
-static int edgetpu_vdd_int_m_get(void *data, u64 *val)
-{
- *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
- TPU_DEBUG_REQ | TPU_VDD_INT_M_DEBUG);
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_vdd_int_m, edgetpu_vdd_int_m_get,
- edgetpu_vdd_int_m_set, "%llu\n");
-
-static int edgetpu_vdd_tpu_set(void *data, u64 val)
-{
- int ret;
- struct device *dev = (struct device *)data;
- unsigned long dbg_rate_req;
-
- if (val > MAX_VOLTAGE_VAL) {
- dev_err(dev, "Preventing VDD_TPU voltage > %duV",
- MAX_VOLTAGE_VAL);
- return -EINVAL;
- }
-
- dbg_rate_req = TPU_DEBUG_REQ | TPU_VDD_TPU_DEBUG;
- dbg_rate_req |= val;
-
- ret = exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
- return ret;
-}
-
-static int edgetpu_vdd_tpu_get(void *data, u64 *val)
-{
- *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
- TPU_DEBUG_REQ | TPU_VDD_TPU_DEBUG);
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_vdd_tpu, edgetpu_vdd_tpu_get,
- edgetpu_vdd_tpu_set, "%llu\n");
-
-static int edgetpu_vdd_tpu_m_set(void *data, u64 val)
-{
- int ret;
- struct device *dev = (struct device *)data;
- unsigned long dbg_rate_req;
-
- if (val > MAX_VOLTAGE_VAL) {
- dev_err(dev, "Preventing VDD_TPU voltage > %duV",
- MAX_VOLTAGE_VAL);
- return -EINVAL;
- }
-
- dbg_rate_req = TPU_DEBUG_REQ | TPU_VDD_TPU_M_DEBUG;
- dbg_rate_req |= val;
-
- ret = exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
- return ret;
-}
-
-static int edgetpu_vdd_tpu_m_get(void *data, u64 *val)
-{
- *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
- TPU_DEBUG_REQ | TPU_VDD_TPU_M_DEBUG);
- return 0;
-}
-
-DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_vdd_tpu_m, edgetpu_vdd_tpu_m_get,
- edgetpu_vdd_tpu_m_set, "%llu\n");
+#include "mobile-pm.c"
static int abrolhos_core_pwr_get(void *data, u64 *val)
{
@@ -401,111 +41,20 @@ static int abrolhos_core_pwr_set(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_core_pwr, abrolhos_core_pwr_get,
abrolhos_core_pwr_set, "%llu\n");
-static int abrolhos_get_initial_pwr_state(struct device *dev)
-{
- switch (power_state) {
- case TPU_DEEP_SLEEP_CLOCKS_SLOW:
- case TPU_DEEP_SLEEP_CLOCKS_FAST:
- case TPU_RETENTION_CLOCKS_SLOW:
- case TPU_ACTIVE_UUD:
- case TPU_ACTIVE_SUD:
- case TPU_ACTIVE_UD:
- case TPU_ACTIVE_NOM:
- case TPU_ACTIVE_OD:
- dev_info(dev, "Initial power state: %d\n", power_state);
- break;
- case TPU_OFF:
- case TPU_DEEP_SLEEP_CLOCKS_OFF:
- case TPU_SLEEP_CLOCKS_OFF:
- dev_warn(dev, "Power state %d prevents control core booting",
- power_state);
- /* fall-thru */
- default:
- dev_warn(dev, "Power state %d is invalid\n", power_state);
- dev_warn(dev, "defaulting to active nominal\n");
- power_state = TPU_ACTIVE_NOM;
- break;
- }
- return power_state;
-}
-
-static void abrolhos_power_down(struct edgetpu_pm *etpm);
-
-static int abrolhos_power_up(struct edgetpu_pm *etpm)
+static int abrolhos_pm_after_create(struct edgetpu_dev *etdev)
{
- struct edgetpu_dev *etdev = etpm->etdev;
-#if IS_ENABLED(CONFIG_GOOGLE_BCL)
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
-#endif
- int ret = abrolhos_pwr_state_set(
- etpm->etdev, abrolhos_get_initial_pwr_state(etdev->dev));
-
- etdev_info(etpm->etdev, "Powering up\n");
-
- if (ret)
- return ret;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
- edgetpu_chip_init(etdev);
+ debugfs_create_file("core_pwr", 0660, platform_pwr->debugfs_dir, etdev, &fops_tpu_core_pwr);
- if (etdev->kci) {
- etdev_dbg(etdev, "Resetting KCI\n");
- edgetpu_kci_reinit(etdev->kci);
- }
- if (etdev->mailbox_manager) {
- etdev_dbg(etdev, "Resetting VII mailboxes\n");
- edgetpu_mailbox_reset_vii(etdev->mailbox_manager);
- }
-
- if (!etdev->firmware)
- return 0;
-
- /*
- * Why this function uses edgetpu_firmware_*_locked functions without explicitly holding
- * edgetpu_firmware_lock:
- *
- * edgetpu_pm_get() is called in two scenarios - one is when the firmware loading is
- * attempting, another one is when the user-space clients need the device be powered
- * (usually through acquiring the wakelock).
- *
- * For the first scenario edgetpu_firmware_is_loading() below shall return true.
- * For the second scenario we are indeed called without holding the firmware lock, but the
- * firmware loading procedures (i.e. the first scenario) always call edgetpu_pm_get() before
- * changing the firmware state, and edgetpu_pm_get() is blocked until this function
- * finishes. In short, we are protected by the PM lock.
- */
-
- if (edgetpu_firmware_is_loading(etdev))
- return 0;
-
- /* attempt firmware run */
- switch (edgetpu_firmware_status_locked(etdev)) {
- case FW_VALID:
- ret = edgetpu_firmware_restart_locked(etdev, false);
- break;
- case FW_INVALID:
- ret = edgetpu_firmware_run_default_locked(etdev);
- break;
- default:
- break;
- }
- if (ret) {
- abrolhos_power_down(etpm);
- } else {
-#if IS_ENABLED(CONFIG_GOOGLE_BCL)
- if (!abpdev->bcl_dev)
- abpdev->bcl_dev = google_retrieve_bcl_handle();
- if (abpdev->bcl_dev)
- google_init_tpu_ratio(abpdev->bcl_dev);
-#endif
- }
-
- return ret;
+ return 0;
}
-static void
-abrolhos_pm_shutdown_firmware(struct abrolhos_platform_dev *etpdev,
- struct edgetpu_dev *etdev)
+static void abrolhos_firmware_down(struct edgetpu_dev *etdev)
{
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+
if (!edgetpu_pchannel_power_down(etdev, false))
return;
@@ -515,272 +64,26 @@ abrolhos_pm_shutdown_firmware(struct abrolhos_platform_dev *etpdev,
!edgetpu_pchannel_power_down(etdev, false))
return;
- etdev_warn(etdev, "Requesting early GSA reset\n");
-
- /*
- * p-channel failed, request GSA shutdown to make sure the CPU is
- * reset.
- * The GSA->APM request will clear any pending DVFS status from the
- * CPU.
- */
- gsa_send_tpu_cmd(etpdev->gsa_dev, GSA_TPU_SHUTDOWN);
-}
-
-static void abrolhos_pm_cleanup_bts_scenario(struct edgetpu_dev *etdev)
-{
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
- int performance_scenario = platform_pwr->performance_scenario;
-
- if (!performance_scenario)
- return;
-
- mutex_lock(&platform_pwr->scenario_lock);
- while (platform_pwr->scenario_count) {
- int ret = bts_del_scenario(performance_scenario);
-
- if (ret) {
- platform_pwr->scenario_count = 0;
- etdev_warn_once(
- etdev,
- "error %d in cleaning up BTS scenario %u\n",
- ret, performance_scenario);
- break;
- }
- platform_pwr->scenario_count--;
- }
- mutex_unlock(&platform_pwr->scenario_lock);
-}
-
-static void abrolhos_power_down(struct edgetpu_pm *etpm)
-{
- struct edgetpu_dev *etdev = etpm->etdev;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
- u64 val;
- int res;
- int min_state = platform_pwr->min_state;
-
- etdev_info(etdev, "Powering down\n");
-
- if (min_state >= TPU_DEEP_SLEEP_CLOCKS_SLOW) {
- etdev_info(etdev, "Power down skipped due to min state = %d\n",
- min_state);
- return;
- }
-
- if (abrolhos_pwr_state_get(etdev, &val)) {
- etdev_warn(etdev, "Failed to read current power state\n");
- val = TPU_ACTIVE_NOM;
- }
- if (val == TPU_OFF) {
- etdev_dbg(etdev, "Device already off, skipping shutdown\n");
- return;
- }
-
- if (etdev->kci && edgetpu_firmware_status_locked(etdev) == FW_VALID) {
- /* Update usage stats before we power off fw. */
- edgetpu_kci_update_usage_locked(etdev);
- abrolhos_pm_shutdown_firmware(abpdev, etdev);
- edgetpu_kci_cancel_work_queues(etdev->kci);
+ if (etmdev->gsa_dev) {
+ etdev_warn(etdev, "Requesting early GSA reset\n");
+ /*
+ * p-channel failed, request GSA shutdown to make sure the CPU is
+ * reset.
+ * The GSA->APM request will clear any pending DVFS status from the
+ * CPU.
+ */
+ gsa_send_tpu_cmd(etmdev->gsa_dev, GSA_TPU_SHUTDOWN);
}
-
- res = gsa_send_tpu_cmd(abpdev->gsa_dev, GSA_TPU_SHUTDOWN);
- if (res < 0)
- etdev_warn(etdev, "GSA shutdown request failed (%d)\n", res);
- abrolhos_pwr_state_set(etdev, TPU_OFF);
-
- /* Remove our vote for INT/MIF state (if any) */
- exynos_pm_qos_update_request(&platform_pwr->int_min, 0);
- exynos_pm_qos_update_request(&platform_pwr->mif_min, 0);
-
- abrolhos_pm_cleanup_bts_scenario(etdev);
-
- /*
- * It should be impossible that power_down() is called when abpdev->secure_client is set.
- * Non-null secure_client implies ext mailbox is acquired, which implies wakelock is
- * acquired.
- * Clear the state here just in case.
- */
- abpdev->secure_client = NULL;
-}
-
-static int abrolhos_pm_after_create(struct edgetpu_pm *etpm)
-{
- int ret;
- struct edgetpu_dev *etdev = etpm->etdev;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct device *dev = etdev->dev;
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
-
- ret = abrolhos_pwr_state_init(dev);
- if (ret)
- return ret;
-
- mutex_init(&platform_pwr->policy_lock);
- mutex_init(&platform_pwr->state_lock);
- mutex_init(&platform_pwr->scenario_lock);
-
- exynos_pm_qos_add_request(&platform_pwr->int_min,
- PM_QOS_DEVICE_THROUGHPUT, 0);
- exynos_pm_qos_add_request(&platform_pwr->mif_min, PM_QOS_BUS_THROUGHPUT,
- 0);
-
- platform_pwr->performance_scenario =
- bts_get_scenindex("tpu_performance");
- if (!platform_pwr->performance_scenario)
- etdev_warn(etdev, "tpu_performance BTS scenario not found\n");
- platform_pwr->scenario_count = 0;
-
- ret = abrolhos_pwr_state_set(etdev,
- abrolhos_get_initial_pwr_state(dev));
- if (ret)
- return ret;
- abrolhos_pwr_debugfs_dir =
- debugfs_create_dir("power", edgetpu_fs_debugfs_dir());
- if (IS_ERR_OR_NULL(abrolhos_pwr_debugfs_dir)) {
- etdev_warn(etdev, "Failed to create debug FS power");
- /* don't fail the procedure on debug FS creation fails */
- return 0;
- }
- debugfs_create_file("state", 0660, abrolhos_pwr_debugfs_dir, etdev,
- &fops_tpu_pwr_state);
- debugfs_create_file("min_state", 0660, abrolhos_pwr_debugfs_dir, etdev,
- &fops_tpu_min_pwr_state);
- debugfs_create_file("vdd_tpu", 0660, abrolhos_pwr_debugfs_dir, dev,
- &fops_tpu_vdd_tpu);
- debugfs_create_file("vdd_tpu_m", 0660, abrolhos_pwr_debugfs_dir, dev,
- &fops_tpu_vdd_tpu_m);
- debugfs_create_file("vdd_int_m", 0660, abrolhos_pwr_debugfs_dir, dev,
- &fops_tpu_vdd_int_m);
- debugfs_create_file("core_rate", 0660, abrolhos_pwr_debugfs_dir, dev,
- &fops_tpu_core_rate);
- debugfs_create_file("ctl_rate", 0660, abrolhos_pwr_debugfs_dir, dev,
- &fops_tpu_ctl_rate);
- debugfs_create_file("axi_rate", 0660, abrolhos_pwr_debugfs_dir, dev,
- &fops_tpu_axi_rate);
- debugfs_create_file("apb_rate", 0440, abrolhos_pwr_debugfs_dir, dev,
- &fops_tpu_apb_rate);
- debugfs_create_file("uart_rate", 0440, abrolhos_pwr_debugfs_dir, dev,
- &fops_tpu_uart_rate);
- debugfs_create_file("policy", 0660, abrolhos_pwr_debugfs_dir,
- abpdev, &fops_tpu_pwr_policy);
- debugfs_create_file("core_pwr", 0660, abrolhos_pwr_debugfs_dir,
- abpdev, &fops_tpu_core_pwr);
-
- return 0;
}
-static void abrolhos_pm_before_destroy(struct edgetpu_pm *etpm)
+int edgetpu_chip_pm_create(struct edgetpu_dev *etdev)
{
- struct edgetpu_dev *etdev = etpm->etdev;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
- debugfs_remove_recursive(abrolhos_pwr_debugfs_dir);
- pm_runtime_disable(etpm->etdev->dev);
- abrolhos_pm_cleanup_bts_scenario(etdev);
- exynos_pm_qos_remove_request(&platform_pwr->int_min);
- exynos_pm_qos_remove_request(&platform_pwr->mif_min);
-}
-
-static struct edgetpu_pm_handlers abrolhos_pm_handlers = {
- .after_create = abrolhos_pm_after_create,
- .before_destroy = abrolhos_pm_before_destroy,
- .power_up = abrolhos_power_up,
- .power_down = abrolhos_power_down,
-};
-
-int abrolhos_pm_create(struct edgetpu_dev *etdev)
-{
- return edgetpu_pm_create(etdev, &abrolhos_pm_handlers);
-}
-
-void abrolhos_pm_destroy(struct edgetpu_dev *etdev)
-{
- edgetpu_pm_destroy(etdev);
-}
-
-void abrolhos_pm_set_pm_qos(struct edgetpu_dev *etdev, u32 pm_qos_val)
-{
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
- s32 int_val = (pm_qos_val >> PM_QOS_INT_SHIFT) * PM_QOS_FACTOR;
- s32 mif_val = (pm_qos_val & PM_QOS_MIF_MASK) * PM_QOS_FACTOR;
-
- etdev_dbg(etdev, "%s: pm_qos request - int = %d mif = %d\n", __func__,
- int_val, mif_val);
-
- exynos_pm_qos_update_request(&platform_pwr->int_min, int_val);
- exynos_pm_qos_update_request(&platform_pwr->mif_min, mif_val);
-}
-
-static void abrolhos_pm_activate_bts_scenario(struct edgetpu_dev *etdev)
-{
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
- int performance_scenario = platform_pwr->performance_scenario;
-
- /* bts_add_scenario() keeps track of reference count internally.*/
- int ret;
+ platform_pwr->firmware_down = abrolhos_firmware_down;
+ platform_pwr->after_create = abrolhos_pm_after_create;
+ platform_pwr->acpm_set_rate = exynos_acpm_set_rate;
- if (!performance_scenario)
- return;
- mutex_lock(&platform_pwr->scenario_lock);
- ret = bts_add_scenario(performance_scenario);
- if (ret)
- etdev_warn_once(etdev, "error %d adding BTS scenario %u\n", ret,
- performance_scenario);
- else
- platform_pwr->scenario_count++;
-
- etdev_dbg(etdev, "BTS Scenario activated: %d\n",
- platform_pwr->scenario_count);
- mutex_unlock(&platform_pwr->scenario_lock);
-}
-
-static void abrolhos_pm_deactivate_bts_scenario(struct edgetpu_dev *etdev)
-{
- /* bts_del_scenario() keeps track of reference count internally.*/
- int ret;
- struct abrolhos_platform_dev *abpdev = to_abrolhos_dev(etdev);
- struct abrolhos_platform_pwr *platform_pwr = &abpdev->platform_pwr;
- int performance_scenario = platform_pwr->performance_scenario;
-
- if (!performance_scenario)
- return;
- mutex_lock(&platform_pwr->scenario_lock);
- if (!platform_pwr->scenario_count) {
- etdev_warn(etdev, "Unbalanced bts deactivate\n");
- mutex_unlock(&platform_pwr->scenario_lock);
- return;
- }
- ret = bts_del_scenario(performance_scenario);
- if (ret)
- etdev_warn_once(etdev, "error %d deleting BTS scenario %u\n",
- ret, performance_scenario);
- else
- platform_pwr->scenario_count--;
-
- etdev_dbg(etdev, "BTS Scenario deactivated: %d\n",
- platform_pwr->scenario_count);
- mutex_unlock(&platform_pwr->scenario_lock);
-}
-
-void abrolhos_pm_set_bts(struct edgetpu_dev *etdev, u32 bts_val)
-{
- etdev_dbg(etdev, "%s: bts request - val = %u\n", __func__, bts_val);
-
- switch (bts_val) {
- case 0:
- abrolhos_pm_deactivate_bts_scenario(etdev);
- break;
- case 1:
- abrolhos_pm_activate_bts_scenario(etdev);
- break;
- default:
- etdev_warn(etdev, "%s: invalid BTS request value: %u\n",
- __func__, bts_val);
- break;
- }
+ return mobile_pm_create(etdev);
}
diff --git a/drivers/edgetpu/abrolhos-thermal.c b/drivers/edgetpu/abrolhos-thermal.c
index 82a8a5a..deb763f 100644
--- a/drivers/edgetpu/abrolhos-thermal.c
+++ b/drivers/edgetpu/abrolhos-thermal.c
@@ -1,454 +1,2 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * EdgeTPU thermal driver for Abrolhos.
- *
- * Copyright (C) 2020 Google, Inc.
- */
-
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/thermal.h>
-#include <linux/version.h>
-#include <soc/google/gs101_tmu.h>
-
-#include "abrolhos-platform.h"
-#include "abrolhos-pm.h"
-#include "edgetpu-config.h"
-#include "edgetpu-internal.h"
-#include "edgetpu-mmu.h"
-#include "edgetpu-pm.h"
-#include "edgetpu-thermal.h"
-
-#define MAX_NUM_TPU_STATES 10
-#define OF_DATA_NUM_MAX MAX_NUM_TPU_STATES * 2
-static struct edgetpu_state_pwr state_pwr_map[MAX_NUM_TPU_STATES] = {0};
-
-/*
- * Sends the thermal throttling KCI if the device is powered.
- *
- * Returns the return value of KCI if the device is powered, otherwise 0.
- */
-static int edgetpu_thermal_kci_if_powered(struct edgetpu_dev *etdev, enum tpu_pwr_state state)
-{
- int ret = 0;
-
- if (edgetpu_pm_get_if_powered(etdev->pm)) {
- ret = edgetpu_kci_notify_throttling(etdev, state);
- if (ret)
- etdev_err_ratelimited(etdev,
- "Failed to notify FW about power state %u, error:%d",
- state, ret);
- edgetpu_pm_put(etdev->pm);
- }
- return ret;
-}
-
-static int edgetpu_get_max_state(struct thermal_cooling_device *cdev,
- unsigned long *state)
-{
- struct edgetpu_thermal *thermal = cdev->devdata;
-
- if (thermal->tpu_num_states <= 0)
- return -ENOSYS;
-
- *state = thermal->tpu_num_states - 1;
- return 0;
-}
-
-/*
- * Set cooling state.
- */
-static int edgetpu_set_cur_state(struct thermal_cooling_device *cdev,
- unsigned long state_original)
-{
- int ret;
- struct edgetpu_thermal *cooling = cdev->devdata;
- struct device *dev = cooling->dev;
- unsigned long pwr_state;
-
- if (state_original >= cooling->tpu_num_states) {
- dev_err(dev, "%s: invalid cooling state %lu\n", __func__,
- state_original);
- return -EINVAL;
- }
-
- state_original = max(cooling->sysfs_req, state_original);
-
- mutex_lock(&cooling->lock);
- pwr_state = state_pwr_map[state_original].state;
- if (state_original != cooling->cooling_state) {
- /*
- * Set the thermal policy through ACPM to allow cooling by DVFS. Any states lower
- * than UUD should be handled by firmware when it gets the throttling notification
- * KCI
- */
- if (pwr_state < TPU_ACTIVE_UUD) {
- dev_warn_ratelimited(
- dev, "Setting lowest DVFS state, waiting for FW to shutdown TPU");
- ret = exynos_acpm_set_policy(TPU_ACPM_DOMAIN, TPU_ACTIVE_UUD);
- } else {
- ret = exynos_acpm_set_policy(TPU_ACPM_DOMAIN, pwr_state);
- }
-
- if (ret) {
- dev_err(dev, "error setting tpu policy: %d\n", ret);
- goto out;
- }
- cooling->cooling_state = state_original;
- } else {
- ret = -EALREADY;
- }
-
-out:
- mutex_unlock(&cooling->lock);
- return ret;
-}
-
-static int edgetpu_get_cur_state(struct thermal_cooling_device *cdev,
- unsigned long *state)
-{
- int ret = 0;
- struct edgetpu_thermal *cooling = cdev->devdata;
-
- *state = cooling->cooling_state;
- if (*state >= cooling->tpu_num_states) {
- dev_warn(cooling->dev,
- "Unknown cooling state: %lu, resetting\n", *state);
- mutex_lock(&cooling->lock);
-
- ret = exynos_acpm_set_policy(TPU_ACPM_DOMAIN, TPU_ACTIVE_OD);
- if (ret) {
- dev_err(cooling->dev, "error setting tpu policy: %d\n",
- ret);
- mutex_unlock(&cooling->lock);
- return ret;
- }
-
- //setting back to "no cooling"
- cooling->cooling_state = 0;
- mutex_unlock(&cooling->lock);
- }
-
- return 0;
-}
-
-static int edgetpu_state2power_internal(unsigned long state, u32 *power,
- struct edgetpu_thermal *thermal)
-{
- int i;
-
- for (i = 0; i < thermal->tpu_num_states; ++i) {
- if (state == state_pwr_map[i].state) {
- *power = state_pwr_map[i].power;
- return 0;
- }
- }
- dev_err(thermal->dev, "Unknown state req for: %lu\n", state);
- *power = 0;
- return -EINVAL;
-}
-
-static int edgetpu_get_requested_power(struct thermal_cooling_device *cdev,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
- struct thermal_zone_device *tz,
-#endif
- u32 *power)
-{
- unsigned long state_original;
- struct edgetpu_thermal *cooling = cdev->devdata;
-
- state_original = exynos_acpm_get_rate(TPU_ACPM_DOMAIN, 0);
- return edgetpu_state2power_internal(state_original, power,
- cooling);
-}
-
-static int edgetpu_state2power(struct thermal_cooling_device *cdev,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
- struct thermal_zone_device *tz,
-#endif
- unsigned long state, u32 *power)
-{
- struct edgetpu_thermal *cooling = cdev->devdata;
-
- if (state >= cooling->tpu_num_states) {
- dev_err(cooling->dev, "%s: invalid state: %lu\n", __func__,
- state);
- return -EINVAL;
- }
-
- return edgetpu_state2power_internal(state_pwr_map[state].state, power,
- cooling);
-}
-
-static int edgetpu_power2state(struct thermal_cooling_device *cdev,
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
- struct thermal_zone_device *tz,
-#endif
- u32 power, unsigned long *state)
-{
- int i, penultimate_throttle_state;
- struct edgetpu_thermal *thermal = cdev->devdata;
-
- *state = 0;
- if (thermal->tpu_num_states < 2)
- return thermal->tpu_num_states == 1 ? 0 : -ENOSYS;
-
- penultimate_throttle_state = thermal->tpu_num_states - 2;
- /*
- * argument "power" is the maximum allowed power consumption in mW as
- * defined by the PID control loop. Check for the first state that is
- * less than or equal to the current allowed power. state_pwr_map is
- * descending, so lowest power consumption is last value in the array
- * return lowest state even if it consumes more power than allowed as
- * not all platforms can handle throttling below an active state
- */
- for (i = penultimate_throttle_state; i >= 0; --i) {
- if (power < state_pwr_map[i].power) {
- *state = i + 1;
- break;
- }
- }
- return 0;
-}
-
-static struct thermal_cooling_device_ops edgetpu_cooling_ops = {
- .get_max_state = edgetpu_get_max_state,
- .get_cur_state = edgetpu_get_cur_state,
- .set_cur_state = edgetpu_set_cur_state,
- .get_requested_power = edgetpu_get_requested_power,
- .state2power = edgetpu_state2power,
- .power2state = edgetpu_power2state,
-};
-
-static void tpu_thermal_exit_cooling(struct edgetpu_thermal *thermal)
-{
- if (!IS_ERR_OR_NULL(thermal->cdev))
- thermal_cooling_device_unregister(thermal->cdev);
-}
-
-static void tpu_thermal_exit(struct edgetpu_thermal *thermal)
-{
- tpu_thermal_exit_cooling(thermal);
- debugfs_remove_recursive(thermal->cooling_root);
-}
-
-static void devm_tpu_thermal_release(struct device *dev, void *res)
-{
- struct edgetpu_thermal *thermal = res;
-
- tpu_thermal_exit(thermal);
-}
-
-static int tpu_thermal_parse_dvfs_table(struct edgetpu_thermal *thermal)
-{
- int row_size, col_size, tbl_size, i;
- int of_data_int_array[OF_DATA_NUM_MAX];
-
- if (of_property_read_u32_array(thermal->dev->of_node,
- "tpu_dvfs_table_size", of_data_int_array, 2 ))
- goto error;
-
- row_size = of_data_int_array[0];
- col_size = of_data_int_array[1];
- tbl_size = row_size * col_size;
- if (row_size > MAX_NUM_TPU_STATES) {
- dev_err(thermal->dev, "too many TPU states\n");
- goto error;
- }
-
- if (tbl_size > OF_DATA_NUM_MAX)
- goto error;
-
- if (of_property_read_u32_array(thermal->dev->of_node,
- "tpu_dvfs_table", of_data_int_array, tbl_size))
- goto error;
-
- thermal->tpu_num_states = row_size;
- for (i = 0; i < row_size; ++i) {
- int idx = col_size * i;
- state_pwr_map[i].state = of_data_int_array[idx];
- state_pwr_map[i].power = of_data_int_array[idx + 1];
- }
-
- return 0;
-
-error:
- dev_err(thermal->dev, "failed to parse DVFS table\n");
- return -EINVAL;
-}
-
-static ssize_t
-user_vote_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct thermal_cooling_device *cdev = container_of(dev, struct thermal_cooling_device, device);
- struct edgetpu_thermal *cooling = cdev->devdata;
-
- if (!cooling)
- return -ENODEV;
-
- return sysfs_emit(buf, "%lu\n", cooling->sysfs_req);
-}
-
-static ssize_t user_vote_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct thermal_cooling_device *cdev = container_of(dev, struct thermal_cooling_device, device);
- struct edgetpu_thermal *cooling = cdev->devdata;
- int ret;
- unsigned long state;
-
- if (!cooling)
- return -ENODEV;
-
- ret = kstrtoul(buf, 0, &state);
- if (ret)
- return ret;
-
- if (state >= cooling->tpu_num_states)
- return -EINVAL;
-
- mutex_lock(&cdev->lock);
- cooling->sysfs_req = state;
- cdev->updated = false;
- mutex_unlock(&cdev->lock);
- thermal_cdev_update(cdev);
- return count;
-}
-
-static DEVICE_ATTR_RW(user_vote);
-
-static int tpu_pause_callback(enum thermal_pause_state action, void *dev)
-{
- int ret = -EINVAL;
-
- if (!dev)
- return ret;
-
- if (action == THERMAL_SUSPEND)
- ret = edgetpu_thermal_suspend(dev);
- else if (action == THERMAL_RESUME)
- ret = edgetpu_thermal_resume(dev);
-
- return ret;
-}
-
-static int
-tpu_thermal_cooling_register(struct edgetpu_thermal *thermal, char *type)
-{
- struct device_node *cooling_node = NULL;
- int err = 0;
-
- thermal->op_data = NULL;
- thermal->tpu_num_states = 0;
-
- err = tpu_thermal_parse_dvfs_table(thermal);
- if (err)
- return err;
-
- mutex_init(&thermal->lock);
- cooling_node = of_find_node_by_name(NULL, "tpu-cooling");
- if (!cooling_node)
- dev_warn(thermal->dev, "failed to find cooling node\n");
- // Initialize the cooling state as 0, means "no cooling"
- thermal->cooling_state = 0;
- thermal->cdev = thermal_of_cooling_device_register(
- cooling_node, type, thermal, &edgetpu_cooling_ops);
- if (IS_ERR(thermal->cdev))
- return PTR_ERR(thermal->cdev);
-
- return device_create_file(&thermal->cdev->device, &dev_attr_user_vote);
-}
-
-static int tpu_thermal_init(struct edgetpu_thermal *thermal, struct device *dev)
-{
- int err;
- struct dentry *d;
-
- d = debugfs_create_dir("cooling", edgetpu_fs_debugfs_dir());
- /* don't let debugfs creation failure abort the init procedure */
- if (IS_ERR_OR_NULL(d))
- dev_warn(dev, "failed to create debug fs for cooling");
- thermal->dev = dev;
- thermal->cooling_root = d;
-
- err = tpu_thermal_cooling_register(thermal, EDGETPU_COOLING_NAME);
- if (err) {
- dev_err(dev, "failed to initialize external cooling\n");
- tpu_thermal_exit(thermal);
- return err;
- }
-
- register_tpu_thermal_pause_cb(tpu_pause_callback, dev);
-
- return 0;
-}
-
-struct edgetpu_thermal
-*devm_tpu_thermal_create(struct device *dev, struct edgetpu_dev *etdev)
-{
- struct edgetpu_thermal *thermal;
- int err;
-
- thermal = devres_alloc(devm_tpu_thermal_release, sizeof(*thermal),
- GFP_KERNEL);
- if (!thermal)
- return ERR_PTR(-ENOMEM);
-
- thermal->etdev = etdev;
- err = tpu_thermal_init(thermal, dev);
- if (err) {
- devres_free(thermal);
- return ERR_PTR(err);
- }
-
- devres_add(dev, thermal);
- return thermal;
-}
-
-int edgetpu_thermal_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct edgetpu_dev *etdev = platform_get_drvdata(pdev);
- struct edgetpu_thermal *cooling = etdev->thermal;
- int ret = 0;
-
- if (IS_ERR(cooling))
- return PTR_ERR(cooling);
- mutex_lock(&cooling->lock);
- /*
- * Always set as suspended even when the FW cannot handle the KCI (it's dead for some
- * unknown reasons) because we still want to prevent the runtime from using TPU.
- */
- cooling->thermal_suspended = true;
- ret = edgetpu_thermal_kci_if_powered(etdev, TPU_OFF);
- mutex_unlock(&cooling->lock);
- return ret;
-}
-
-int edgetpu_thermal_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct edgetpu_dev *etdev = platform_get_drvdata(pdev);
- struct edgetpu_thermal *cooling = etdev->thermal;
- int ret = 0;
-
- if (IS_ERR(cooling))
- return PTR_ERR(cooling);
- mutex_lock(&cooling->lock);
- ret = edgetpu_thermal_kci_if_powered(etdev, state_pwr_map[0].state);
- /*
- * Unlike edgetpu_thermal_suspend(), only set the device is resumed if the FW handled the
- * KCI request.
- */
- if (!ret)
- cooling->thermal_suspended = false;
- mutex_unlock(&cooling->lock);
- return ret;
-}
+#include "mobile-thermal.c"
diff --git a/drivers/edgetpu/abrolhos-pm.h b/drivers/edgetpu/abrolhos/config-pwr-state.h
index 48b65be..2a9a82f 100644
--- a/drivers/edgetpu/abrolhos-pm.h
+++ b/drivers/edgetpu/abrolhos/config-pwr-state.h
@@ -1,43 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Power management header for Abrolhos.
+ * Chip-dependent power configuration and states.
*
- * Copyright (C) 2020 Google, Inc.
+ * Copyright (C) 2021 Google, Inc.
*/
-#ifndef __ABROLHOS_PM_H__
-#define __ABROLHOS_PM_H__
-
-#include "edgetpu-kci.h"
-#include "edgetpu-internal.h"
-
-/* Can't build out of tree with acpm_dvfs unless kernel supports ACPM */
-#if IS_ENABLED(CONFIG_ACPM_DVFS) || IS_ENABLED(CONFIG_EDGETPU_TEST)
-
-#include <linux/acpm_dvfs.h>
-
-#else
-
-static unsigned long exynos_acpm_rate;
-static inline int exynos_acpm_set_rate(unsigned int id, unsigned long rate)
-{
- exynos_acpm_rate = rate;
- return 0;
-}
-static inline int exynos_acpm_set_init_freq(unsigned int dfs_id,
- unsigned long freq)
-{
- return 0;
-}
-static inline unsigned long exynos_acpm_get_rate(unsigned int id,
- unsigned long dbg_val)
-{
- return exynos_acpm_rate;
-}
-static inline int exynos_acpm_set_policy(unsigned int id, unsigned long policy)
-{
- return 0;
-}
-#endif /* IS_ENABLED(CONFIG_ACPM_DVFS) || IS_ENABLED(CONFIG_EDGETPU_TEST) */
+
+#ifndef __ABROLHOS_CONFIG_PWR_STATE_H__
+#define __ABROLHOS_CONFIG_PWR_STATE_H__
/*
* TPU Power States:
@@ -54,7 +23,7 @@ static inline int exynos_acpm_set_policy(unsigned int id, unsigned long policy)
* 1000000: Nominal @1066MHz
* 1200000: Overdrive @1230MHz
*/
-enum tpu_pwr_state {
+enum edgetpu_pwr_state {
TPU_OFF = 0,
TPU_DEEP_SLEEP_CLOCKS_OFF = 1,
TPU_DEEP_SLEEP_CLOCKS_SLOW = 2,
@@ -69,31 +38,18 @@ enum tpu_pwr_state {
TPU_ACTIVE_OD = 1230000,
};
-/*
- * Request codes from firmware
- * Values must match with firmware code base
- */
-enum abrolhos_reverse_kci_code {
- RKCI_CODE_PM_QOS = RKCI_CHIP_CODE_FIRST + 1,
- RKCI_CODE_BTS = RKCI_CHIP_CODE_FIRST + 2,
-};
+#define MIN_ACTIVE_STATE TPU_DEEP_SLEEP_CLOCKS_SLOW
+
+#define EDGETPU_NUM_STATES 5
+
+extern enum edgetpu_pwr_state edgetpu_active_states[];
+
+extern uint32_t *edgetpu_states_display;
#define TPU_POLICY_MAX TPU_ACTIVE_OD
#define TPU_ACPM_DOMAIN 7
-#define TPU_DEBUG_REQ (1 << 31)
-#define TPU_VDD_TPU_DEBUG (0 << 27)
-#define TPU_VDD_TPU_M_DEBUG (1 << 27)
-#define TPU_VDD_INT_M_DEBUG (2 << 27)
-#define TPU_CLK_CORE_DEBUG (3 << 27)
-#define TPU_CLK_CTL_DEBUG (4 << 27)
-#define TPU_CLK_AXI_DEBUG (5 << 27)
-#define TPU_CLK_APB_DEBUG (6 << 27)
-#define TPU_CLK_UART_DEBUG (7 << 27)
-#define TPU_CORE_PWR_DEBUG (8 << 27)
-#define TPU_DEBUG_VALUE_MASK ((1 << 27) - 1)
-
#define OSCCLK_RATE 24576
#define PLL_SHARED0_DIV0 1066000
#define PLL_SHARED1_DIV2 933000
@@ -157,12 +113,4 @@ enum abrolhos_reverse_kci_code {
#define DIV_CMU_RATIO_MASK 0xf
#define DIV_USER_RATIO_MASK 0x7
-int abrolhos_pm_create(struct edgetpu_dev *etdev);
-
-void abrolhos_pm_destroy(struct edgetpu_dev *etdev);
-
-void abrolhos_pm_set_pm_qos(struct edgetpu_dev *etdev, u32 pm_qos_val);
-
-void abrolhos_pm_set_bts(struct edgetpu_dev *etdev, u32 bts_val);
-
-#endif /* __ABROLHOS_PM_H__ */
+#endif /* __ABROLHOS_CONFIG_PWR_STATE_H__ */
diff --git a/drivers/edgetpu/abrolhos/config.h b/drivers/edgetpu/abrolhos/config.h
index bb3cc39..57d6170 100644
--- a/drivers/edgetpu/abrolhos/config.h
+++ b/drivers/edgetpu/abrolhos/config.h
@@ -13,6 +13,8 @@
#define EDGETPU_DEV_MAX 1
#define EDGETPU_HAS_MULTI_GROUPS
+/* 1 context per VII/group plus 1 for KCI */
+#define EDGETPU_NCONTEXTS 8
/* Max number of virtual context IDs that can be allocated for one device. */
#define EDGETPU_NUM_VCIDS 16
/* Reserved VCID that uses the extra partition. */
@@ -67,6 +69,7 @@
#define EDGETPU_DEBUG_DUMP_MEM_SIZE 0x4E0000
#include "config-mailbox.h"
+#include "config-pwr-state.h"
#include "config-tpu-cpu.h"
#include "csrs.h"
diff --git a/drivers/edgetpu/edgetpu-config.h b/drivers/edgetpu/edgetpu-config.h
index a76d8e3..ff1ad77 100644
--- a/drivers/edgetpu/edgetpu-config.h
+++ b/drivers/edgetpu/edgetpu-config.h
@@ -8,14 +8,20 @@
#ifndef __EDGETPU_CONFIG_H__
#define __EDGETPU_CONFIG_H__
-#ifndef CONFIG_ABROLHOS
-#define CONFIG_ABROLHOS
-#warning "Building default chipset abrolhos"
-#endif
-
+#if IS_ENABLED(CONFIG_ABROLHOS)
#include "abrolhos/config.h"
+#else /* unknown */
+
+#error "Unknown EdgeTPU config"
+
+#endif /* unknown */
+
#define EDGETPU_DEFAULT_FIRMWARE_NAME "google/edgetpu-" DRIVER_NAME ".fw"
#define EDGETPU_TEST_FIRMWARE_NAME "google/edgetpu-" DRIVER_NAME "-test.fw"
+#ifndef EDGETPU_NUM_CORES
+#define EDGETPU_NUM_CORES 1
+#endif
+
#endif /* __EDGETPU_CONFIG_H__ */
diff --git a/drivers/edgetpu/edgetpu-core.c b/drivers/edgetpu/edgetpu-core.c
index ff51da1..f4b89ae 100644
--- a/drivers/edgetpu/edgetpu-core.c
+++ b/drivers/edgetpu/edgetpu-core.c
@@ -37,6 +37,12 @@
#include "edgetpu-wakelock.h"
#include "edgetpu.h"
+/* Bits higher than VMA_TYPE_WIDTH are used to carry type specific data, e.g., core id. */
+#define VMA_TYPE_WIDTH 16
+#define VMA_TYPE(x) ((x) & (BIT_MASK(VMA_TYPE_WIDTH) - 1))
+#define VMA_DATA_GET(x) ((x) >> VMA_TYPE_WIDTH)
+#define VMA_DATA_SET(x, y) (VMA_TYPE(x) | ((y) << VMA_TYPE_WIDTH))
+
enum edgetpu_vma_type {
VMA_INVALID,
@@ -44,14 +50,18 @@ enum edgetpu_vma_type {
VMA_VII_CSR,
VMA_VII_CMDQ,
VMA_VII_RESPQ,
+ /* For VMA_LOG and VMA_TRACE, core id is stored in bits higher than VMA_TYPE_WIDTH. */
VMA_LOG,
VMA_TRACE,
};
+/* type that combines enum edgetpu_vma_type and data in higher bits. */
+typedef u32 edgetpu_vma_flags_t;
+
/* structure to be set to vma->vm_private_data on mmap */
struct edgetpu_vma_private {
struct edgetpu_client *client;
- enum edgetpu_vma_type type;
+ edgetpu_vma_flags_t flag;
/*
* vm_private_data is copied when a VMA is split, using this reference
* counter to know when should this object be freed.
@@ -80,7 +90,7 @@ static int edgetpu_mmap_full_csr(struct edgetpu_client *client,
return ret;
}
-static enum edgetpu_vma_type mmap_vma_type(unsigned long pgoff)
+static edgetpu_vma_flags_t mmap_vma_flag(unsigned long pgoff)
{
const unsigned long off = pgoff << PAGE_SHIFT;
@@ -94,9 +104,15 @@ static enum edgetpu_vma_type mmap_vma_type(unsigned long pgoff)
case EDGETPU_MMAP_RESP_QUEUE_OFFSET:
return VMA_VII_RESPQ;
case EDGETPU_MMAP_LOG_BUFFER_OFFSET:
- return VMA_LOG;
+ return VMA_DATA_SET(VMA_LOG, 0);
case EDGETPU_MMAP_TRACE_BUFFER_OFFSET:
- return VMA_TRACE;
+ return VMA_DATA_SET(VMA_TRACE, 0);
+#if EDGETPU_NUM_CORES > 1
+ case EDGETPU_MMAP_LOG1_BUFFER_OFFSET:
+ return VMA_DATA_SET(VMA_LOG, 1);
+ case EDGETPU_MMAP_TRACE1_BUFFER_OFFSET:
+ return VMA_DATA_SET(VMA_TRACE, 1);
+#endif /* EDGETPU_NUM_CORES > 1 */
default:
return VMA_INVALID;
}
@@ -125,14 +141,14 @@ vma_type_to_wakelock_event(enum edgetpu_vma_type type)
static struct edgetpu_vma_private *
edgetpu_vma_private_alloc(struct edgetpu_client *client,
- enum edgetpu_vma_type type)
+ edgetpu_vma_flags_t flag)
{
struct edgetpu_vma_private *pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
if (!pvt)
return NULL;
pvt->client = edgetpu_client_get(client);
- pvt->type = type;
+ pvt->flag = flag;
refcount_set(&pvt->count, 1);
return pvt;
@@ -159,23 +175,25 @@ static void edgetpu_vma_open(struct vm_area_struct *vma)
enum edgetpu_wakelock_event evt;
struct edgetpu_client *client;
struct edgetpu_dev *etdev;
+ enum edgetpu_vma_type type = VMA_TYPE(pvt->flag);
edgetpu_vma_private_get(pvt);
client = pvt->client;
etdev = client->etdev;
- evt = vma_type_to_wakelock_event(pvt->type);
+ evt = vma_type_to_wakelock_event(type);
if (evt != EDGETPU_WAKELOCK_EVENT_END)
edgetpu_wakelock_inc_event(client->wakelock, evt);
/* handle telemetry types */
- switch (pvt->type) {
+ switch (type) {
case VMA_LOG:
- edgetpu_telemetry_inc_mmap_count(etdev, EDGETPU_TELEMETRY_LOG);
+ edgetpu_telemetry_inc_mmap_count(etdev, EDGETPU_TELEMETRY_LOG,
+ VMA_DATA_GET(pvt->flag));
break;
case VMA_TRACE:
- edgetpu_telemetry_inc_mmap_count(etdev,
- EDGETPU_TELEMETRY_TRACE);
+ edgetpu_telemetry_inc_mmap_count(etdev, EDGETPU_TELEMETRY_TRACE,
+ VMA_DATA_GET(pvt->flag));
break;
default:
break;
@@ -187,20 +205,22 @@ static void edgetpu_vma_close(struct vm_area_struct *vma)
{
struct edgetpu_vma_private *pvt = vma->vm_private_data;
struct edgetpu_client *client = pvt->client;
- enum edgetpu_wakelock_event evt = vma_type_to_wakelock_event(pvt->type);
+ enum edgetpu_vma_type type = VMA_TYPE(pvt->flag);
+ enum edgetpu_wakelock_event evt = vma_type_to_wakelock_event(type);
struct edgetpu_dev *etdev = client->etdev;
if (evt != EDGETPU_WAKELOCK_EVENT_END)
edgetpu_wakelock_dec_event(client->wakelock, evt);
/* handle telemetry types */
- switch (pvt->type) {
+ switch (type) {
case VMA_LOG:
- edgetpu_telemetry_dec_mmap_count(etdev, EDGETPU_TELEMETRY_LOG);
+ edgetpu_telemetry_dec_mmap_count(etdev, EDGETPU_TELEMETRY_LOG,
+ VMA_DATA_GET(pvt->flag));
break;
case VMA_TRACE:
- edgetpu_telemetry_dec_mmap_count(etdev,
- EDGETPU_TELEMETRY_TRACE);
+ edgetpu_telemetry_dec_mmap_count(etdev, EDGETPU_TELEMETRY_TRACE,
+ VMA_DATA_GET(pvt->flag));
break;
default:
break;
@@ -218,24 +238,26 @@ static const struct vm_operations_struct edgetpu_vma_ops = {
int edgetpu_mmap(struct edgetpu_client *client, struct vm_area_struct *vma)
{
int ret = 0;
+ edgetpu_vma_flags_t flag;
enum edgetpu_vma_type type;
enum edgetpu_wakelock_event evt;
struct edgetpu_vma_private *pvt;
if (vma->vm_start & ~PAGE_MASK) {
etdev_dbg(client->etdev,
- "Base address not page-aligned: 0x%lx\n",
+ "Base address not page-aligned: %#lx\n",
vma->vm_start);
return -EINVAL;
}
- etdev_dbg(client->etdev, "%s: mmap pgoff = %lX\n", __func__,
+ etdev_dbg(client->etdev, "%s: mmap pgoff = %#lX\n", __func__,
vma->vm_pgoff);
- type = mmap_vma_type(vma->vm_pgoff);
+ flag = mmap_vma_flag(vma->vm_pgoff);
+ type = VMA_TYPE(flag);
if (type == VMA_INVALID)
return -EINVAL;
- pvt = edgetpu_vma_private_alloc(client, type);
+ pvt = edgetpu_vma_private_alloc(client, flag);
if (!pvt)
return -ENOMEM;
@@ -259,19 +281,19 @@ int edgetpu_mmap(struct edgetpu_client *client, struct vm_area_struct *vma)
/* Allow mapping log and telemetry buffers without a group */
if (type == VMA_LOG) {
- ret = edgetpu_mmap_telemetry_buffer(client->etdev,
- EDGETPU_TELEMETRY_LOG, vma);
+ ret = edgetpu_mmap_telemetry_buffer(client->etdev, EDGETPU_TELEMETRY_LOG, vma,
+ VMA_DATA_GET(flag));
goto out_set_op;
}
if (type == VMA_TRACE) {
- ret = edgetpu_mmap_telemetry_buffer(
- client->etdev, EDGETPU_TELEMETRY_TRACE, vma);
+ ret = edgetpu_mmap_telemetry_buffer(client->etdev, EDGETPU_TELEMETRY_TRACE, vma,
+ VMA_DATA_GET(flag));
goto out_set_op;
}
evt = vma_type_to_wakelock_event(type);
/*
- * @type should always correspond to a valid event since we handled
+ * VMA_TYPE(@flag) should always correspond to a valid event since we handled
* telemetry mmaps above, still check evt != END in case new types are
* added in the future.
*/
@@ -416,7 +438,7 @@ int edgetpu_device_add(struct edgetpu_dev *etdev,
}
etdev->telemetry =
- devm_kzalloc(etdev->dev, sizeof(*etdev->telemetry), GFP_KERNEL);
+ devm_kcalloc(etdev->dev, etdev->num_cores, sizeof(*etdev->telemetry), GFP_KERNEL);
if (!etdev->telemetry) {
ret = -ENOMEM;
goto remove_usage_stats;
@@ -587,7 +609,8 @@ int edgetpu_alloc_coherent(struct edgetpu_dev *etdev, size_t size,
struct edgetpu_coherent_mem *mem,
enum edgetpu_context_id context_id)
{
- const u32 flags = EDGETPU_MMU_DIE | EDGETPU_MMU_32 | EDGETPU_MMU_HOST;
+ const u32 flags = EDGETPU_MMU_DIE | EDGETPU_MMU_32 | EDGETPU_MMU_HOST |
+ EDGETPU_MMU_COHERENT;
mem->vaddr = dma_alloc_coherent(etdev->dev, size, &mem->dma_addr,
GFP_KERNEL);
diff --git a/drivers/edgetpu/edgetpu-debug-dump.c b/drivers/edgetpu/edgetpu-debug-dump.c
index d8ccde9..3836e85 100644
--- a/drivers/edgetpu/edgetpu-debug-dump.c
+++ b/drivers/edgetpu/edgetpu-debug-dump.c
@@ -9,15 +9,10 @@
#include "edgetpu-config.h"
#include "edgetpu-debug-dump.h"
+#include "edgetpu-device-group.h"
#include "edgetpu-iremap-pool.h"
#include "edgetpu-kci.h"
-static inline u64 word_align_offset(u64 offset)
-{
- return offset/sizeof(u64) +
- (((offset % sizeof(u64)) == 0) ? 0 : 1);
-}
-
int edgetpu_get_debug_dump(struct edgetpu_dev *etdev, u64 type)
{
int ret;
@@ -43,52 +38,50 @@ int edgetpu_get_debug_dump(struct edgetpu_dev *etdev, u64 type)
return ret;
}
+static void edgetpu_reset_debug_dump(struct edgetpu_dev *etdev)
+{
+ memset(etdev->debug_dump_mem.vaddr, 0, etdev->debug_dump_mem.size);
+}
+
static void edgetpu_debug_dump_work(struct work_struct *work)
{
struct edgetpu_dev *etdev;
struct edgetpu_debug_dump_setup *dump_setup;
struct edgetpu_debug_dump *debug_dump;
int ret;
- u64 offset, dump_reason;
+ u64 dump_reason;
etdev = container_of(work, struct edgetpu_dev, debug_dump_work);
dump_setup =
(struct edgetpu_debug_dump_setup *)etdev->debug_dump_mem.vaddr;
- offset = sizeof(struct edgetpu_debug_dump_setup);
- debug_dump = (struct edgetpu_debug_dump *)((u64 *)dump_setup +
- word_align_offset(offset));
+ debug_dump = (struct edgetpu_debug_dump *)(dump_setup + 1);
if (!etdev->debug_dump_handlers) {
- etdev_err(etdev,
- "Failed to generate coredump as handler is NULL");
- goto debug_dump_work_done;
+ etdev_err(etdev, "Failed to generate coredump as handler is NULL");
+ edgetpu_reset_debug_dump(etdev);
+ return;
}
- dump_reason = dump_setup->dump_req_reason;
- if (dump_reason >= DUMP_REQ_REASON_NUM ||
+ dump_reason = debug_dump->dump_reason;
+ if (dump_reason >= DUMP_REASON_NUM ||
!etdev->debug_dump_handlers[dump_reason]) {
etdev_err(etdev,
- "Failed to generate coredump as handler is NULL for dump request reason: 0x%llx",
+ "Failed to generate coredump as handler is NULL for dump request reason: %#llx",
dump_reason);
- goto debug_dump_work_done;
+ edgetpu_reset_debug_dump(etdev);
+ return;
}
- ret = etdev->debug_dump_handlers[dump_reason]
- ((void *)etdev, (void *)dump_setup);
- if (ret) {
+ ret = etdev->debug_dump_handlers[dump_reason]((void *)etdev, (void *)dump_setup);
+ if (ret)
etdev_err(etdev, "Failed to generate coredump: %d\n", ret);
- goto debug_dump_work_done;
- }
-
-debug_dump_work_done:
- debug_dump->host_dump_available_to_read = false;
+ edgetpu_reset_debug_dump(etdev);
}
void edgetpu_debug_dump_resp_handler(struct edgetpu_dev *etdev)
{
struct edgetpu_debug_dump_setup *dump_setup;
struct edgetpu_debug_dump *debug_dump;
- u64 offset;
if (!etdev->debug_dump_mem.vaddr) {
etdev_err(etdev, "Debug dump memory not allocated");
@@ -96,12 +89,12 @@ void edgetpu_debug_dump_resp_handler(struct edgetpu_dev *etdev)
}
dump_setup =
(struct edgetpu_debug_dump_setup *)etdev->debug_dump_mem.vaddr;
- offset = sizeof(struct edgetpu_debug_dump_setup);
- debug_dump = (struct edgetpu_debug_dump *)((u64 *)dump_setup +
- word_align_offset(offset));
+ debug_dump = (struct edgetpu_debug_dump *)(dump_setup + 1);
if (!debug_dump->host_dump_available_to_read)
return;
+ debug_dump->host_dump_available_to_read = false;
+
if (!etdev->debug_dump_work.func)
INIT_WORK(&etdev->debug_dump_work, edgetpu_debug_dump_work);
diff --git a/drivers/edgetpu/edgetpu-debug-dump.h b/drivers/edgetpu/edgetpu-debug-dump.h
index 125ed1a..6a30ef8 100644
--- a/drivers/edgetpu/edgetpu-debug-dump.h
+++ b/drivers/edgetpu/edgetpu-debug-dump.h
@@ -10,7 +10,7 @@
#include "edgetpu-internal.h"
-#define DEBUG_DUMP_HOST_CONTRACT_VERSION 2
+#define DEBUG_DUMP_HOST_CONTRACT_VERSION 3
enum edgetpu_dump_type_bit_position {
DUMP_TYPE_CRASH_REASON_BIT = 0,
@@ -24,11 +24,16 @@ enum edgetpu_dump_type_bit_position {
};
-enum edgetpu_dump_request_reason {
- DUMP_REQ_REASON_DEFAULT = 0,
- DUMP_REQ_REASON_WDT_TIMEOUT = 1,
- DUMP_REQ_REASON_BY_USER = 2,
- DUMP_REQ_REASON_NUM = 3
+enum edgetpu_dump_reason {
+ DUMP_REASON_DEFAULT = 0,
+ /* Host request reasons */
+ DUMP_REASON_REQ_BY_USER = 1,
+
+ /* FW side dump reasons */
+ DUMP_REASON_FW_CHECKPOINT = 2,
+ DUMP_REASON_RECOVERABLE_FAULT = 3,
+
+ DUMP_REASON_NUM = 4
};
struct edgetpu_crash_reason {
@@ -51,6 +56,7 @@ struct edgetpu_debug_dump {
u64 magic; /* word identifying the beginning of the dump info */
u64 version; /* host-firmware dump info contract version */
u64 host_dump_available_to_read; /* is new info available */
+ u64 dump_reason; /* Reason or context for debug dump */
u64 reserved[2];
u64 crash_reason_offset; /* byte offset to crash reason */
u64 crash_reason_size; /* crash reason size */
@@ -64,7 +70,6 @@ struct edgetpu_debug_dump_setup {
/* types of dumps requested by host */
u64 type;
u64 dump_mem_size; /* total size of memory allocated to dump */
- u64 dump_req_reason; /* debug dump request reason */
u64 reserved[2];
};
diff --git a/drivers/edgetpu/edgetpu-device-group.c b/drivers/edgetpu/edgetpu-device-group.c
index 3b8ddf8..6f338be 100644
--- a/drivers/edgetpu/edgetpu-device-group.c
+++ b/drivers/edgetpu/edgetpu-device-group.c
@@ -12,6 +12,7 @@
#include <linux/eventfd.h>
#include <linux/iommu.h>
#include <linux/kconfig.h>
+#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/refcount.h>
@@ -40,10 +41,7 @@
#include "edgetpu-p2p-mailbox.h"
#endif
-#define for_each_list_client(c, group) \
- list_for_each_entry(c, &group->clients, list)
-
-#define for_each_list_client_safe(c, n, group) \
+#define for_each_list_group_client_safe(c, n, group) \
list_for_each_entry_safe(c, n, &group->clients, list)
/* Records the mapping and other fields needed for a host buffer mapping */
@@ -306,20 +304,20 @@ static struct edgetpu_client *edgetpu_device_group_leader(
{
if (group->n_clients < 1 || edgetpu_device_group_is_disbanded(group))
return NULL;
- return list_first_entry(&group->clients, struct edgetpu_list_client,
- list)->client;
+ return list_first_entry(&group->clients,
+ struct edgetpu_list_group_client, list)->client;
}
static int group_alloc_members(struct edgetpu_device_group *group)
{
- struct edgetpu_list_client *c;
+ struct edgetpu_list_group_client *c;
int i = 0;
group->members = kcalloc(group->n_clients, sizeof(*group->members),
GFP_KERNEL);
if (!group->members)
return -ENOMEM;
- for_each_list_client(c, group) {
+ for_each_list_group_client(c, group) {
group->members[i] = c->client;
i++;
}
@@ -602,7 +600,7 @@ void edgetpu_device_group_leave(struct edgetpu_client *client)
{
struct edgetpu_device_group *group;
struct edgetpu_list_group *l;
- struct edgetpu_list_client *cur, *nxt;
+ struct edgetpu_list_group_client *cur, *nxt;
bool will_disband = false;
mutex_lock(&client->group_lock);
@@ -628,8 +626,8 @@ void edgetpu_device_group_leave(struct edgetpu_client *client)
/* release the group before removing any members */
edgetpu_device_group_release(group);
- /* removes the client from the list */
- for_each_list_client_safe(cur, nxt, group) {
+ /* removes the client from the group list */
+ for_each_list_group_client_safe(cur, nxt, group) {
if (cur->client == client) {
list_del(&cur->list);
kfree(cur);
@@ -732,7 +730,7 @@ error:
int edgetpu_device_group_add(struct edgetpu_device_group *group,
struct edgetpu_client *client)
{
- struct edgetpu_list_client *c;
+ struct edgetpu_list_group_client *c;
int ret = 0;
mutex_lock(&client->group_lock);
@@ -747,7 +745,7 @@ int edgetpu_device_group_add(struct edgetpu_device_group *group,
goto out;
}
- for_each_list_client(c, group) {
+ for_each_list_group_client(c, group) {
if (!edgetpu_clients_groupable(c->client, client)) {
ret = -EINVAL;
goto out;
@@ -936,6 +934,7 @@ static int edgetpu_map_iova_sgt_worker(struct iova_mapping_worker_param *param)
edgetpu_mmu_reserve(etdev, map->alloc_iova, map->alloc_size);
ret = edgetpu_mmu_map_iova_sgt(etdev, map->device_address,
&hmap->sg_tables[i], map->dir,
+ map_to_mmu_flags(map->flags),
ctx_id);
if (ret)
edgetpu_mmu_free(etdev, map->alloc_iova, map->alloc_size);
@@ -1057,7 +1056,7 @@ static void edgetpu_unmap_node(struct edgetpu_mapping *map)
struct sg_page_iter sg_iter;
uint i;
- etdev_dbg(group->etdev, "%s: %u: die=%d, iova=0x%llx", __func__,
+ etdev_dbg(group->etdev, "%s: %u: die=%d, iova=%#llx", __func__,
group->workload_id, map->die_index, map->device_address);
if (map->device_address) {
@@ -1106,9 +1105,9 @@ static void edgetpu_host_map_show(struct edgetpu_mapping *map,
seq_puts(s, " mirrored: ");
else
seq_printf(s, " die %u: ", map->die_index);
- seq_printf(s, "0x%llx %lu %s 0x%llx %pap %pad\n",
+ seq_printf(s, "%#llx %lu %s %#llx %pap %pad\n",
map->device_address + cur_offset,
- sg_dma_len(sg) / PAGE_SIZE,
+ DIV_ROUND_UP(sg_dma_len(sg), PAGE_SIZE),
edgetpu_dma_dir_rw_s(map->dir),
map->host_address + cur_offset, &phys_addr,
&dma_addr);
@@ -1116,6 +1115,12 @@ static void edgetpu_host_map_show(struct edgetpu_mapping *map,
}
}
+size_t edgetpu_group_mappings_total_size(struct edgetpu_device_group *group)
+{
+ return edgetpu_mappings_total_size(&group->host_mappings) +
+ edgetpu_mappings_total_size(&group->dmabuf_mappings);
+}
+
/*
* Pins the user-space address @arg->host_address and returns the pinned pages.
* @pnum_pages is set to the number of pages.
@@ -1128,7 +1133,6 @@ static struct page **edgetpu_pin_user_pages(struct edgetpu_device_group *group,
{
u64 host_addr = untagged_addr(arg->host_address);
u64 size = arg->size;
- const enum dma_data_direction dir = arg->flags & EDGETPU_MAP_DIR_MASK;
uint num_pages;
ulong offset;
struct edgetpu_dev *etdev = group->etdev;
@@ -1145,12 +1149,8 @@ static struct page **edgetpu_pin_user_pages(struct edgetpu_device_group *group,
/* overflow check */
if (unlikely((size + offset) / PAGE_SIZE >= UINT_MAX - 1 || size + offset < size))
return ERR_PTR(-ENOMEM);
- num_pages = (size + offset) / PAGE_SIZE;
- if ((size + offset) % PAGE_SIZE)
- num_pages++;
-
- etdev_dbg(etdev, "%s: hostaddr=0x%llx pages=%u dir=%x", __func__,
- host_addr, num_pages, dir);
+ num_pages = DIV_ROUND_UP((size + offset), PAGE_SIZE);
+ etdev_dbg(etdev, "%s: hostaddr=%#llx pages=%u", __func__, host_addr, num_pages);
/*
* "num_pages" is decided from user-space arguments, don't show warnings
* when facing malicious input.
@@ -1247,7 +1247,6 @@ alloc_mapping_from_useraddr(struct edgetpu_device_group *group, u64 host_addr,
{
struct edgetpu_dev *etdev = group->etdev;
struct edgetpu_host_map *hmap;
- const enum dma_data_direction dir = flags & EDGETPU_MAP_DIR_MASK;
int n;
struct sg_table *sgt;
int i;
@@ -1260,7 +1259,7 @@ alloc_mapping_from_useraddr(struct edgetpu_device_group *group, u64 host_addr,
}
hmap->map.host_address = host_addr;
- hmap->map.dir = dir;
+ hmap->map.dir = map_flag_to_host_dma_dir(flags);
hmap->map.priv = edgetpu_device_group_get(group);
hmap->map.release = edgetpu_unmap_node;
hmap->map.show = edgetpu_host_map_show;
@@ -1477,7 +1476,7 @@ int edgetpu_device_group_map(struct edgetpu_device_group *group,
ret = edgetpu_device_group_map_iova_sgt(group, hmap);
if (ret) {
etdev_dbg(etdev,
- "group add translation failed %u:0x%llx",
+ "group add translation failed %u:%#llx",
group->workload_id, map->device_address);
goto error;
}
@@ -1489,6 +1488,7 @@ int edgetpu_device_group_map(struct edgetpu_device_group *group,
goto error;
}
+ map->map_size = arg->size;
/*
* @map can be freed (by another thread) once it's added to the mappings, record the address
* before that.
@@ -1496,7 +1496,7 @@ int edgetpu_device_group_map(struct edgetpu_device_group *group,
tpu_addr = map->device_address;
ret = edgetpu_mapping_add(&group->host_mappings, map);
if (ret) {
- etdev_dbg(etdev, "duplicate mapping %u:0x%llx", group->workload_id, tpu_addr);
+ etdev_dbg(etdev, "duplicate mapping %u:%#llx", group->workload_id, tpu_addr);
goto error;
}
@@ -1543,7 +1543,7 @@ int edgetpu_device_group_unmap(struct edgetpu_device_group *group,
if (!map) {
edgetpu_mapping_unlock(&group->host_mappings);
etdev_dbg(group->etdev,
- "%s: mapping not found for workload %u: 0x%llx",
+ "%s: mapping not found for workload %u: %#llx",
__func__, group->workload_id, tpu_addr);
ret = -EINVAL;
goto unlock_group;
@@ -1564,6 +1564,10 @@ int edgetpu_device_group_sync_buffer(struct edgetpu_device_group *group,
struct edgetpu_mapping *map;
int ret = 0;
tpu_addr_t tpu_addr = arg->device_address;
+ /*
+ * Sync operations don't care the data correctness of prefetch by TPU CPU if they mean to
+ * sync FROM_DEVICE only, so @dir here doesn't need to be wrapped with host_dma_dir().
+ */
enum dma_data_direction dir = arg->flags & EDGETPU_MAP_DIR_MASK;
struct edgetpu_host_map *hmap;
@@ -1609,7 +1613,10 @@ void edgetpu_mappings_clear_group(struct edgetpu_device_group *group)
void edgetpu_group_mappings_show(struct edgetpu_device_group *group,
struct seq_file *s)
{
- seq_printf(s, "workload %u", group->workload_id);
+ enum edgetpu_context_id context =
+ edgetpu_group_context_id_locked(group);
+
+ seq_printf(s, "group %u", group->workload_id);
switch (group->status) {
case EDGETPU_DEVICE_GROUP_WAITING:
case EDGETPU_DEVICE_GROUP_FINALIZED:
@@ -1621,27 +1628,38 @@ void edgetpu_group_mappings_show(struct edgetpu_device_group *group,
seq_puts(s, ": disbanded\n");
return;
}
- seq_printf(s, " context %d:\n", edgetpu_group_context_id_locked(group));
+
+ if (context == EDGETPU_CONTEXT_INVALID)
+ seq_puts(s, " context (none):\n");
+ else if (context & EDGETPU_CONTEXT_DOMAIN_TOKEN)
+ seq_printf(s, " context detached %#x:\n",
+ context & ~(EDGETPU_CONTEXT_DOMAIN_TOKEN));
+ else
+ seq_printf(s, " context mbox %d:\n", context);
if (group->host_mappings.count) {
- seq_puts(s, "host buffer mappings:\n");
+ seq_printf(s, "host buffer mappings (%zd):\n",
+ group->host_mappings.count);
edgetpu_mappings_show(&group->host_mappings, s);
}
if (group->dmabuf_mappings.count) {
- seq_puts(s, "dma-buf buffer mappings:\n");
+ seq_printf(s, "dma-buf buffer mappings (%zd):\n",
+ group->dmabuf_mappings.count);
edgetpu_mappings_show(&group->dmabuf_mappings, s);
}
if (group->vii.cmd_queue_mem.vaddr) {
seq_puts(s, "VII queues:\n");
- seq_printf(s, " 0x%llx %lu cmdq 0x%llx %pad\n",
+ seq_printf(s, " %#llx %lu cmdq %#llx %pad\n",
group->vii.cmd_queue_mem.tpu_addr,
- group->vii.cmd_queue_mem.size / PAGE_SIZE,
+ DIV_ROUND_UP(group->vii.cmd_queue_mem.size,
+ PAGE_SIZE),
group->vii.cmd_queue_mem.host_addr,
&group->vii.cmd_queue_mem.dma_addr);
- seq_printf(s, " 0x%llx %lu rspq 0x%llx %pad\n",
+ seq_printf(s, " %#llx %lu rspq %#llx %pad\n",
group->vii.resp_queue_mem.tpu_addr,
- group->vii.resp_queue_mem.size / PAGE_SIZE,
+ DIV_ROUND_UP(group->vii.resp_queue_mem.size,
+ PAGE_SIZE),
group->vii.resp_queue_mem.host_addr,
&group->vii.resp_queue_mem.dma_addr);
}
@@ -1719,7 +1737,7 @@ out:
void edgetpu_group_fatal_error_notify(struct edgetpu_device_group *group,
uint error_mask)
{
- etdev_dbg(group->etdev, "notify group %u error 0x%x",
+ etdev_dbg(group->etdev, "notify group %u error %#x",
group->workload_id, error_mask);
mutex_lock(&group->lock);
/*
diff --git a/drivers/edgetpu/edgetpu-device-group.h b/drivers/edgetpu/edgetpu-device-group.h
index 7b20dd0..5c5eaaf 100644
--- a/drivers/edgetpu/edgetpu-device-group.h
+++ b/drivers/edgetpu/edgetpu-device-group.h
@@ -24,7 +24,7 @@
#include "edgetpu.h"
/* entry of edgetpu_device_group#clients */
-struct edgetpu_list_client {
+struct edgetpu_list_group_client {
struct list_head list;
struct edgetpu_client *client;
};
@@ -157,6 +157,11 @@ struct edgetpu_list_group {
for (l = list_entry(etdev->groups.next, typeof(*l), list), g = l->grp; \
&l->list != &etdev->groups; \
l = list_entry(l->list.next, typeof(*l), list), g = l->grp)
+
+/* Loop through group->clients (hold group->lock prior). */
+#define for_each_list_group_client(c, group) \
+ list_for_each_entry(c, &group->clients, list)
+
/*
* Returns if the group is waiting for members to join.
*
@@ -330,6 +335,9 @@ int edgetpu_device_group_sync_buffer(struct edgetpu_device_group *group,
/* Clear all mappings for a device group. */
void edgetpu_mappings_clear_group(struct edgetpu_device_group *group);
+/* Return total size of all mappings for the group in bytes */
+size_t edgetpu_group_mappings_total_size(struct edgetpu_device_group *group);
+
/*
* Return context ID for group MMU mappings.
*
diff --git a/drivers/edgetpu/edgetpu-dmabuf.c b/drivers/edgetpu/edgetpu-dmabuf.c
index 1c89178..f98aafe 100644
--- a/drivers/edgetpu/edgetpu-dmabuf.c
+++ b/drivers/edgetpu/edgetpu-dmabuf.c
@@ -10,6 +10,7 @@
#include <linux/dma-direction.h>
#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/list.h>
#include <linux/seq_file.h>
@@ -91,10 +92,11 @@ static const struct dma_fence_ops edgetpu_dma_fence_ops;
static int etdev_add_translations(struct edgetpu_dev *etdev,
tpu_addr_t tpu_addr,
struct dmabuf_map_entry *entry,
+ u32 mmu_flags,
enum dma_data_direction dir,
enum edgetpu_context_id ctx_id)
{
- const int prot = __dma_dir_to_iommu_prot(dir, etdev->dev);
+ int prot = mmu_flag_to_iommu_prot(mmu_flags, etdev->dev, dir);
uint i;
u64 offset = 0;
int ret;
@@ -145,7 +147,7 @@ static void etdev_remove_translations(struct edgetpu_dev *etdev,
*/
static int etdev_map_dmabuf(struct edgetpu_dev *etdev,
struct edgetpu_dmabuf_map *dmap,
- enum dma_data_direction dir, tpu_addr_t *tpu_addr_p)
+ tpu_addr_t *tpu_addr_p)
{
struct edgetpu_device_group *group = dmap->map.priv;
const enum edgetpu_context_id ctx_id =
@@ -164,7 +166,7 @@ static int etdev_map_dmabuf(struct edgetpu_dev *etdev,
return -ENOSPC;
} else {
tpu_addr =
- edgetpu_mmu_tpu_map_sgt(etdev, &entry->shrunk_sgt, dir,
+ edgetpu_mmu_tpu_map_sgt(etdev, &entry->shrunk_sgt, dmap->map.dir,
ctx_id, dmap->mmu_flags);
if (!tpu_addr)
return -ENOSPC;
@@ -202,7 +204,7 @@ static void etdev_unmap_dmabuf(struct edgetpu_dev *etdev,
*/
static int group_map_dmabuf(struct edgetpu_device_group *group,
struct edgetpu_dmabuf_map *dmap,
- enum dma_data_direction dir, tpu_addr_t *tpu_addr_p)
+ tpu_addr_t *tpu_addr_p)
{
const enum edgetpu_context_id ctx_id =
edgetpu_group_context_id_locked(group);
@@ -211,7 +213,7 @@ static int group_map_dmabuf(struct edgetpu_device_group *group,
uint i;
int ret;
- ret = etdev_map_dmabuf(etdev, dmap, dir, &tpu_addr);
+ ret = etdev_map_dmabuf(etdev, dmap, &tpu_addr);
if (ret)
return ret;
for (i = 1; i < group->n_clients; i++) {
@@ -221,7 +223,7 @@ static int group_map_dmabuf(struct edgetpu_device_group *group,
continue;
}
ret = etdev_add_translations(etdev, tpu_addr, &dmap->entries[i],
- dir, ctx_id);
+ dmap->mmu_flags, dmap->map.dir, ctx_id);
if (ret)
goto err_remove;
}
@@ -276,7 +278,7 @@ static void dmabuf_map_callback_release(struct edgetpu_mapping *map)
struct edgetpu_dmabuf_map *dmap =
container_of(map, struct edgetpu_dmabuf_map, map);
struct edgetpu_device_group *group = map->priv;
- const enum dma_data_direction dir = edgetpu_host_dma_dir(map->dir);
+ const enum dma_data_direction dir = map->dir;
tpu_addr_t tpu_addr = map->device_address;
struct edgetpu_dev *etdev;
uint i;
@@ -336,13 +338,15 @@ static void dmabuf_map_callback_show(struct edgetpu_mapping *map,
container_of(map, struct edgetpu_dmabuf_map, map);
if (IS_MIRRORED(dmap->map.flags))
- seq_printf(s, " <%s> mirrored: iova=0x%llx pages=%llu %s",
- dmap->dmabufs[0]->exp_name, map->device_address, dmap->size / PAGE_SIZE,
+ seq_printf(s, " <%s> mirrored: iova=%#llx pages=%llu %s",
+ dmap->dmabufs[0]->exp_name, map->device_address,
+ DIV_ROUND_UP(dmap->size, PAGE_SIZE),
edgetpu_dma_dir_rw_s(map->dir));
else
- seq_printf(s, " <%s> die %u: iova=0x%llx pages=%llu %s",
+ seq_printf(s, " <%s> die %u: iova=%#llx pages=%llu %s",
dmap->dmabufs[0]->exp_name, map->die_index, map->device_address,
- dmap->size / PAGE_SIZE, edgetpu_dma_dir_rw_s(map->dir));
+ DIV_ROUND_UP(dmap->size, PAGE_SIZE),
+ edgetpu_dma_dir_rw_s(map->dir));
edgetpu_device_dram_dmabuf_info_show(dmap->dmabufs[0], s);
seq_puts(s, " dma=");
@@ -403,7 +407,7 @@ static void dmabuf_bulk_map_callback_release(struct edgetpu_mapping *map)
struct edgetpu_dmabuf_map *bmap =
container_of(map, struct edgetpu_dmabuf_map, map);
struct edgetpu_device_group *group = map->priv;
- const enum dma_data_direction dir = edgetpu_host_dma_dir(map->dir);
+ const enum dma_data_direction dir = map->dir;
const tpu_addr_t tpu_addr = map->device_address;
int i;
@@ -414,8 +418,7 @@ static void dmabuf_bulk_map_callback_release(struct edgetpu_mapping *map)
sg_free_table(&entry->shrunk_sgt);
if (entry->sgt)
- dma_buf_unmap_attachment(entry->attachment, entry->sgt,
- dir);
+ dma_buf_unmap_attachment(entry->attachment, entry->sgt, dir);
if (entry->attachment)
dma_buf_detach(bmap->dmabufs[i], entry->attachment);
if (bmap->dmabufs[i])
@@ -434,8 +437,8 @@ static void dmabuf_bulk_map_callback_show(struct edgetpu_mapping *map,
container_of(map, struct edgetpu_dmabuf_map, map);
int i;
- seq_printf(s, " bulk: iova=0x%llx pages=%llu %s\n",
- map->device_address, bmap->size / PAGE_SIZE,
+ seq_printf(s, " bulk: iova=%#llx pages=%llu %s\n",
+ map->device_address, DIV_ROUND_UP(bmap->size, PAGE_SIZE),
edgetpu_dma_dir_rw_s(map->dir));
for (i = 0; i < bmap->num_entries; i++) {
if (!bmap->dmabufs[i]) {
@@ -633,8 +636,7 @@ int edgetpu_map_dmabuf(struct edgetpu_device_group *group,
struct dma_buf *dmabuf;
edgetpu_map_flag_t flags = arg->flags;
u64 size;
- const enum dma_data_direction dir =
- edgetpu_host_dma_dir(flags & EDGETPU_MAP_DIR_MASK);
+ const enum dma_data_direction dir = map_flag_to_host_dma_dir(flags);
struct edgetpu_dev *etdev;
struct edgetpu_dmabuf_map *dmap;
tpu_addr_t tpu_addr;
@@ -668,7 +670,7 @@ int edgetpu_map_dmabuf(struct edgetpu_device_group *group,
get_dma_buf(dmabuf);
dmap->dmabufs[0] = dmabuf;
- dmap->size = size = dmabuf->size;
+ dmap->map.map_size = dmap->size = size = dmabuf->size;
if (IS_MIRRORED(flags)) {
for (i = 0; i < group->n_clients; i++) {
etdev = edgetpu_device_group_nth_etdev(group, i);
@@ -681,7 +683,7 @@ int edgetpu_map_dmabuf(struct edgetpu_device_group *group,
goto err_release_map;
}
}
- ret = group_map_dmabuf(group, dmap, dir, &tpu_addr);
+ ret = group_map_dmabuf(group, dmap, &tpu_addr);
if (ret) {
etdev_dbg(group->etdev,
"%s: group_map_dmabuf returns %d\n",
@@ -705,7 +707,7 @@ int edgetpu_map_dmabuf(struct edgetpu_device_group *group,
__func__, ret);
goto err_release_map;
}
- ret = etdev_map_dmabuf(etdev, dmap, dir, &tpu_addr);
+ ret = etdev_map_dmabuf(etdev, dmap, &tpu_addr);
if (ret) {
etdev_dbg(group->etdev,
"%s: etdev_map_dmabuf returns %d\n",
@@ -771,8 +773,7 @@ out_unlock:
int edgetpu_map_bulk_dmabuf(struct edgetpu_device_group *group,
struct edgetpu_map_bulk_dmabuf_ioctl *arg)
{
- const enum dma_data_direction dir =
- edgetpu_host_dma_dir(arg->flags & EDGETPU_MAP_DIR_MASK);
+ const enum dma_data_direction dir = map_flag_to_host_dma_dir(arg->flags);
int ret = -EINVAL;
struct edgetpu_dmabuf_map *bmap;
struct dma_buf *dmabuf;
@@ -820,7 +821,7 @@ int edgetpu_map_bulk_dmabuf(struct edgetpu_device_group *group,
if (ret)
goto err_release_bmap;
}
- ret = group_map_dmabuf(group, bmap, dir, &tpu_addr);
+ ret = group_map_dmabuf(group, bmap, &tpu_addr);
if (ret)
goto err_release_bmap;
bmap->map.device_address = tpu_addr;
diff --git a/drivers/edgetpu/edgetpu-external.c b/drivers/edgetpu/edgetpu-external.c
new file mode 100644
index 0000000..1552774
--- /dev/null
+++ b/drivers/edgetpu/edgetpu-external.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Utility functions for interfacing other modules with Edge TPU ML accelerator.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+
+#include <soc/google/tpu-ext.h>
+
+#include "edgetpu-config.h"
+#include "edgetpu-device-group.h"
+#include "edgetpu-internal.h"
+#include "edgetpu-mailbox.h"
+
+static enum edgetpu_ext_mailbox_type
+edgetpu_external_client_to_mailbox_type(enum edgetpu_ext_client_type client_type)
+{
+ switch (client_type) {
+ case EDGETPU_EXTERNAL_CLIENT_TYPE_DSP:
+ return EDGETPU_EXTERNAL_MAILBOX_TYPE_DSP;
+ case EDGETPU_EXTERNAL_CLIENT_TYPE_AOC:
+ return EDGETPU_EXTERNAL_MAILBOX_TYPE_AOC;
+ default:
+ return -ENOENT;
+ }
+}
+
+static int edgetpu_external_mailbox_info_get(struct edgetpu_ext_mailbox_info *info,
+ struct edgetpu_external_mailbox *ext_mailbox)
+{
+ int i;
+ u32 count = ext_mailbox->count;
+ struct edgetpu_mailbox_descriptor *desc;
+
+ if (!info)
+ return -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ desc = &ext_mailbox->descriptors[i];
+ info->mailboxes[i].cmdq_pa = desc->cmd_queue_mem.phys_addr;
+ info->mailboxes[i].respq_pa = desc->resp_queue_mem.phys_addr;
+ }
+
+ info->cmdq_size = ext_mailbox->attr.cmd_queue_size;
+ info->respq_size = ext_mailbox->attr.resp_queue_size;
+
+ return 0;
+}
+
+static int edgetpu_external_mailbox_alloc(struct device *edgetpu_dev,
+ struct edgetpu_ext_client_info *client_info,
+ struct edgetpu_ext_mailbox_info *info,
+ enum edgetpu_ext_client_type client_type)
+{
+ struct edgetpu_client *client;
+ struct edgetpu_device_group *group;
+ struct edgetpu_external_mailbox *ext_mailbox;
+ struct edgetpu_external_mailbox_req req;
+ int ret = 0;
+ struct fd f = fdget(client_info->tpu_fd);
+ struct file *file = f.file;
+
+ if (!file)
+ return -EBADF;
+
+ if (!is_edgetpu_file(file)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ client = file->private_data;
+ if (!client || client->etdev->dev != edgetpu_dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ req.mbox_type = edgetpu_external_client_to_mailbox_type(client_type);
+ req.mbox_map = client_info->mbox_map;
+
+ ret = edgetpu_chip_get_ext_mailbox_index(req.mbox_type, &req.start, &req.end);
+ if (ret)
+ goto out;
+
+ mutex_lock(&client->group_lock);
+ if (!client->group) {
+ ret = -EINVAL;
+ mutex_unlock(&client->group_lock);
+ goto out;
+ }
+ group = edgetpu_device_group_get(client->group);
+ mutex_unlock(&client->group_lock);
+
+ if (copy_from_user(&req.attr, (void __user *)client_info->attr, sizeof(req.attr))) {
+ if (!client_info->attr)
+ etdev_warn(client->etdev,
+ "Illegal mailbox attributes, using VII mailbox attrs\n");
+ req.attr = group->mbox_attr;
+ }
+
+ ret = edgetpu_mailbox_enable_ext(client, EDGETPU_MAILBOX_ID_USE_ASSOC, &req);
+ if (ret) {
+ edgetpu_device_group_put(group);
+ goto out;
+ }
+ mutex_lock(&group->lock);
+ ext_mailbox = group->ext_mailbox;
+ if (!ext_mailbox) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ ret = edgetpu_external_mailbox_info_get(info, ext_mailbox);
+unlock:
+ mutex_unlock(&group->lock);
+ edgetpu_device_group_put(group);
+out:
+ fdput(f);
+ return ret;
+}
+
+static int edgetpu_external_mailbox_free(struct device *edgetpu_dev,
+ struct edgetpu_ext_client_info *client_info)
+{
+ struct edgetpu_client *client;
+ int ret = 0;
+ struct fd f = fdget(client_info->tpu_fd);
+ struct file *file = f.file;
+
+ if (!file)
+ return -EBADF;
+
+ if (!is_edgetpu_file(file)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ client = file->private_data;
+ if (!client || client->etdev->dev != edgetpu_dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = edgetpu_mailbox_disable_ext(client, EDGETPU_MAILBOX_ID_USE_ASSOC);
+out:
+ fdput(f);
+ return ret;
+}
+
+int edgetpu_ext_driver_cmd(struct device *edgetpu_dev,
+ enum edgetpu_ext_client_type client_type,
+ enum edgetpu_ext_commands cmd_id, void *in_data, void *out_data)
+{
+ switch (cmd_id) {
+ case ALLOCATE_EXTERNAL_MAILBOX:
+ return edgetpu_external_mailbox_alloc(edgetpu_dev, in_data, out_data, client_type);
+ case FREE_EXTERNAL_MAILBOX:
+ return edgetpu_external_mailbox_free(edgetpu_dev, in_data);
+ default:
+ return -ENOENT;
+ }
+}
+EXPORT_SYMBOL_GPL(edgetpu_ext_driver_cmd);
diff --git a/drivers/edgetpu/edgetpu-firmware.c b/drivers/edgetpu/edgetpu-firmware.c
index d23d00c..31f0f4c 100644
--- a/drivers/edgetpu/edgetpu-firmware.c
+++ b/drivers/edgetpu/edgetpu-firmware.c
@@ -22,33 +22,12 @@
#include "edgetpu-internal.h"
#include "edgetpu-kci.h"
#include "edgetpu-pm.h"
-#include "edgetpu-shared-fw.h"
#include "edgetpu-sw-watchdog.h"
#include "edgetpu-telemetry.h"
static char *firmware_name;
module_param(firmware_name, charp, 0660);
-/*
- * Descriptor for loaded firmware, either in shared buffer mode or legacy mode
- * (non-shared, custom allocated memory).
- */
-struct edgetpu_firmware_desc {
- /*
- * Mode independent buffer information. This is either passed into or
- * updated by handlers.
- */
- struct edgetpu_firmware_buffer buf;
- /*
- * Shared firmware buffer when we're using shared buffer mode. This
- * pointer to keep and release the reference count on unloading this
- * shared firmware buffer.
- *
- * This is NULL when firmware is loaded in legacy mode.
- */
- struct edgetpu_shared_fw_buffer *shared_buf;
-};
-
struct edgetpu_firmware_private {
const struct edgetpu_firmware_chip_data *chip_fw;
void *data; /* for edgetpu_firmware_(set/get)_data */
@@ -70,107 +49,6 @@ void *edgetpu_firmware_get_data(struct edgetpu_firmware *et_fw)
return et_fw->p->data;
}
-static int edgetpu_firmware_legacy_load_locked(
- struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_desc *fw_desc, const char *name)
-{
- int ret;
- struct edgetpu_dev *etdev = et_fw->etdev;
- struct device *dev = etdev->dev;
- const struct firmware *fw;
- size_t aligned_size;
-
- ret = request_firmware(&fw, name, dev);
- if (ret) {
- etdev_dbg(etdev,
- "%s: request '%s' failed: %d\n", __func__, name, ret);
- return ret;
- }
-
- aligned_size = ALIGN(fw->size, fw_desc->buf.used_size_align);
- if (aligned_size > fw_desc->buf.alloc_size) {
- etdev_dbg(etdev,
- "%s: firmware buffer too small: alloc size=0x%zx, required size=0x%zx\n",
- __func__, fw_desc->buf.alloc_size, aligned_size);
- ret = -ENOSPC;
- goto out_release_firmware;
- }
-
- memcpy(fw_desc->buf.vaddr, fw->data, fw->size);
- fw_desc->buf.used_size = aligned_size;
- fw_desc->buf.name = kstrdup(name, GFP_KERNEL);
-
-out_release_firmware:
- release_firmware(fw);
- return ret;
-}
-
-static void edgetpu_firmware_legacy_unload_locked(
- struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_desc *fw_desc)
-{
- kfree(fw_desc->buf.name);
- fw_desc->buf.name = NULL;
- fw_desc->buf.used_size = 0;
-}
-
-static int edgetpu_firmware_shared_load_locked(
- struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_desc *fw_desc, const char *name)
-{
- int ret;
- struct edgetpu_dev *etdev = et_fw->etdev;
- struct edgetpu_shared_fw_buffer *shared_buf;
-
- shared_buf = edgetpu_shared_fw_load(name, etdev);
- if (IS_ERR(shared_buf)) {
- ret = PTR_ERR(shared_buf);
- etdev_dbg(etdev, "shared buffer loading failed: %d\n", ret);
- return ret;
- }
- fw_desc->shared_buf = shared_buf;
- fw_desc->buf.vaddr = edgetpu_shared_fw_buffer_vaddr(shared_buf);
- fw_desc->buf.alloc_size = edgetpu_shared_fw_buffer_size(shared_buf);
- fw_desc->buf.used_size = fw_desc->buf.alloc_size;
- fw_desc->buf.name = edgetpu_shared_fw_buffer_name(shared_buf);
- return 0;
-}
-
-static void edgetpu_firmware_shared_unload_locked(
- struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_desc *fw_desc)
-{
- fw_desc->buf.vaddr = NULL;
- fw_desc->buf.alloc_size = 0;
- fw_desc->buf.used_size = 0;
- fw_desc->buf.name = NULL;
- edgetpu_shared_fw_put(fw_desc->shared_buf);
- fw_desc->shared_buf = NULL;
-}
-
-static int edgetpu_firmware_do_load_locked(
- struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_desc *fw_desc, const char *name)
-{
- /* Use shared firmware from host if not allocated a buffer space. */
- if (!fw_desc->buf.vaddr)
- return edgetpu_firmware_shared_load_locked(et_fw, fw_desc,
- name);
- else
- return edgetpu_firmware_legacy_load_locked(et_fw, fw_desc,
- name);
-}
-
-static void edgetpu_firmware_do_unload_locked(
- struct edgetpu_firmware *et_fw,
- struct edgetpu_firmware_desc *fw_desc)
-{
- if (fw_desc->shared_buf)
- edgetpu_firmware_shared_unload_locked(et_fw, fw_desc);
- else
- edgetpu_firmware_legacy_unload_locked(et_fw, fw_desc);
-}
-
static int edgetpu_firmware_load_locked(
struct edgetpu_firmware *et_fw,
struct edgetpu_firmware_desc *fw_desc, const char *name,
@@ -191,7 +69,7 @@ static int edgetpu_firmware_load_locked(
}
}
- ret = edgetpu_firmware_do_load_locked(et_fw, fw_desc, name);
+ ret = edgetpu_firmware_chip_load_locked(et_fw, fw_desc, name);
if (ret) {
etdev_err(etdev, "firmware request failed: %d\n", ret);
goto out_free_buffer;
@@ -202,14 +80,14 @@ static int edgetpu_firmware_load_locked(
if (ret) {
etdev_err(etdev, "handler setup_buffer failed: %d\n",
ret);
- goto out_do_unload_locked;
+ goto out_unload_locked;
}
}
return 0;
-out_do_unload_locked:
- edgetpu_firmware_do_unload_locked(et_fw, fw_desc);
+out_unload_locked:
+ edgetpu_firmware_chip_unload_locked(et_fw, fw_desc);
out_free_buffer:
if (chip_fw->free_buffer)
chip_fw->free_buffer(et_fw, &fw_desc->buf);
@@ -227,7 +105,7 @@ static void edgetpu_firmware_unload_locked(
*/
if (chip_fw->teardown_buffer)
chip_fw->teardown_buffer(et_fw, &fw_desc->buf);
- edgetpu_firmware_do_unload_locked(et_fw, fw_desc);
+ edgetpu_firmware_chip_unload_locked(et_fw, fw_desc);
/*
* Platform specific implementation for freeing allocated buffer.
*/
@@ -463,7 +341,7 @@ int edgetpu_firmware_run_locked(struct edgetpu_firmware *et_fw,
if (ret)
goto out_failed;
- etdev_dbg(et_fw->etdev, "run fw %s flags=0x%x", name, flags);
+ etdev_dbg(et_fw->etdev, "run fw %s flags=%#x", name, flags);
if (chip_fw->prepare_run) {
/* Note this may recursively call us to run BL1 */
ret = chip_fw->prepare_run(et_fw, &new_fw_desc.buf);
@@ -867,7 +745,7 @@ void edgetpu_firmware_mappings_show(struct edgetpu_dev *etdev,
return;
fw_iova_target = fw_buf->dram_tpa ? fw_buf->dram_tpa : fw_buf->dma_addr;
iova = edgetpu_chip_firmware_iova(etdev);
- seq_printf(s, " 0x%lx %lu fw - %pad %s\n", iova,
- fw_buf->alloc_size / PAGE_SIZE, &fw_iova_target,
+ seq_printf(s, " %#lx %lu fw - %pad %s\n", iova,
+ DIV_ROUND_UP(fw_buf->alloc_size, PAGE_SIZE), &fw_iova_target,
fw_buf->flags & FW_ONDEV ? "dev" : "");
}
diff --git a/drivers/edgetpu/edgetpu-firmware.h b/drivers/edgetpu/edgetpu-firmware.h
index 0d3e1d4..f24135b 100644
--- a/drivers/edgetpu/edgetpu-firmware.h
+++ b/drivers/edgetpu/edgetpu-firmware.h
@@ -94,6 +94,26 @@ struct edgetpu_firmware_buffer {
const char *name; /* the name of this firmware */
};
+/*
+ * Descriptor for loaded firmware, either in shared buffer mode or carveout mode
+ * (non-shared, custom allocated memory).
+ */
+struct edgetpu_firmware_desc {
+ /*
+ * Mode independent buffer information. This is either passed into or
+ * updated by handlers.
+ */
+ struct edgetpu_firmware_buffer buf;
+ /*
+ * Shared firmware buffer when we're using shared buffer mode. This
+ * pointer to keep and release the reference count on unloading this
+ * shared firmware buffer.
+ *
+ * This is NULL when firmware is loaded in carveout mode.
+ */
+ struct edgetpu_shared_fw_buffer *shared_buf;
+};
+
struct edgetpu_firmware_chip_data {
/* Name of default firmware image for this chip. */
const char *default_firmware_name;
@@ -162,6 +182,19 @@ struct edgetpu_firmware_chip_data {
};
/*
+ * Chip-dependent (actually chip family dependent, mobile vs. MCP) calls
+ * for loading/unloading firmware images. These handle chip-specified carveout
+ * buffers vs. shared firmware handling for multi-chip platforms. Used by the
+ * common firmware layer.
+ */
+int edgetpu_firmware_chip_load_locked(
+ struct edgetpu_firmware *et_fw,
+ struct edgetpu_firmware_desc *fw_desc, const char *name);
+void edgetpu_firmware_chip_unload_locked(
+ struct edgetpu_firmware *et_fw,
+ struct edgetpu_firmware_desc *fw_desc);
+
+/*
* Returns the chip-specific IOVA where the firmware is mapped.
*
* Debug purpose only.
@@ -252,4 +285,20 @@ uint32_t edgetpu_firmware_get_cl(struct edgetpu_firmware *et_fw);
/* Returns the build time of the image in seconds since 1970. */
uint64_t edgetpu_firmware_get_build_time(struct edgetpu_firmware *et_fw);
+/*
+ * Kernel verify firmware signature (if EDGETPU_FEATURE_FW_SIG enabled).
+ *
+ * @etdev: the edgetpu_dev for which the initial load of a (probably
+ * shared) firmware image is requested
+ * @name: name of the image being validated (request_firmware path)
+ * @image_data: passes in the pointer to the raw image with signature, returns
+ * pointer to the firmware code image.
+ * @image_size: passes in the size of the raw image with signature, returns
+ * size of the firmware code image.
+ */
+bool edgetpu_firmware_verify_signature(struct edgetpu_dev *etdev,
+ const char *name,
+ void **image_data, size_t *image_size);
+
+
#endif /* __EDGETPU_FIRMWARE_H__ */
diff --git a/drivers/edgetpu/edgetpu-fs.c b/drivers/edgetpu/edgetpu-fs.c
index 50ead8e..46de18f 100644
--- a/drivers/edgetpu/edgetpu-fs.c
+++ b/drivers/edgetpu/edgetpu-fs.c
@@ -66,7 +66,7 @@ static struct dentry *edgetpu_debugfs_dir;
} \
} while (0)
-static bool is_edgetpu_file(struct file *file)
+bool is_edgetpu_file(struct file *file)
{
if (edgetpu_is_external_wrapper_class_file(file))
return true;
@@ -871,7 +871,7 @@ static void dump_statusregs_ranges(
for (reg = ranges[i].firstreg; reg <= ranges[i].lastreg;
reg += sizeof(val)) {
val = edgetpu_dev_read_64(etdev, reg);
- seq_printf(s, "0x%08x: 0x%016llx\n", reg, val);
+ seq_printf(s, "%#08x: %#016llx\n", reg, val);
}
}
}
@@ -891,15 +891,15 @@ static void dump_mboxes(struct seq_file *s, struct edgetpu_dev *etdev)
for (offset = 0x0; offset <= 0x40; offset += sizeof(val)) {
val = edgetpu_dev_read_32(etdev, base + offset);
- seq_printf(s, "0x%08x: 0x%08x\n", base + offset, val);
+ seq_printf(s, "%#08x: %#08x\n", base + offset, val);
}
for (offset = 0x1000; offset <= 0x1014; offset += sizeof(val)) {
val = edgetpu_dev_read_32(etdev, base + offset);
- seq_printf(s, "0x%08x: 0x%08x\n", base + offset, val);
+ seq_printf(s, "%#08x: %#08x\n", base + offset, val);
}
for (offset = 0x1800; offset <= 0x1818; offset += sizeof(val)) {
val = edgetpu_dev_read_32(etdev, base + offset);
- seq_printf(s, "0x%08x: 0x%08x\n", base + offset, val);
+ seq_printf(s, "%#08x: %#08x\n", base + offset, val);
}
}
}
@@ -1037,10 +1037,98 @@ static ssize_t clients_show(
}
static DEVICE_ATTR_RO(clients);
+static ssize_t show_group(struct edgetpu_dev *etdev,
+ struct edgetpu_device_group *group, char *buf,
+ ssize_t buflen)
+{
+ enum edgetpu_context_id context =
+ edgetpu_group_context_id_locked(group);
+ struct edgetpu_list_group_client *lc;
+ ssize_t len;
+ ssize_t ret = 0;
+
+ len = scnprintf(buf, buflen - ret, "group %u ", group->workload_id);
+ buf += len;
+ ret += len;
+
+ switch (group->status) {
+ case EDGETPU_DEVICE_GROUP_WAITING:
+ len = scnprintf(buf, buflen - ret, "forming ");
+ buf += len;
+ ret += len;
+ break;
+ case EDGETPU_DEVICE_GROUP_FINALIZED:
+ break;
+ case EDGETPU_DEVICE_GROUP_ERRORED:
+ len = scnprintf(buf, buflen - ret, "error %#x ",
+ group->fatal_errors);
+ buf += len;
+ ret += len;
+ break;
+ case EDGETPU_DEVICE_GROUP_DISBANDED:
+ len = scnprintf(buf, buflen - ret, "disbanded\n");
+ ret += len;
+ return ret;
+ }
+
+ if (context == EDGETPU_CONTEXT_INVALID)
+ len = scnprintf(buf, buflen - ret, "context (none) ");
+ else if (context & EDGETPU_CONTEXT_DOMAIN_TOKEN)
+ len = scnprintf(buf, buflen - ret, "context detached %#x ",
+ context & ~(EDGETPU_CONTEXT_DOMAIN_TOKEN));
+ else
+ len = scnprintf(buf, buflen - ret, "context mbox %d ",
+ context);
+ buf += len;
+ ret += len;
+ len = scnprintf(buf, buflen - ret, "vcid %u %s%s\n",
+ group->vcid, group->dev_inaccessible ? "i" : "",
+ group->ext_mailbox ? "x" : "");
+ buf += len;
+ ret += len;
+
+ for_each_list_group_client(lc, group) {
+ len = scnprintf(buf, buflen - ret, "client %s %d:%d\n",
+ lc->client->etiface->name,
+ lc->client->pid, lc->client->tgid);
+ buf += len;
+ ret += len;
+ }
+
+ len = scnprintf(buf, buflen - ret, "mappings %zd %zdB\n",
+ group->host_mappings.count +
+ group->dmabuf_mappings.count,
+ edgetpu_group_mappings_total_size(group));
+ buf += len;
+ ret += len;
+ return ret;
+}
+
+static ssize_t groups_show(
+ struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct edgetpu_dev *etdev = dev_get_drvdata(dev);
+ struct edgetpu_device_group *group;
+ struct edgetpu_list_group *lg;
+ ssize_t ret = 0;
+
+ mutex_lock(&etdev->groups_lock);
+ etdev_for_each_group(etdev, lg, group) {
+ edgetpu_device_group_get(group);
+ ret += show_group(etdev, group, buf + ret, PAGE_SIZE - ret);
+ edgetpu_device_group_put(group);
+ }
+ mutex_unlock(&etdev->groups_lock);
+ return ret;
+}
+static DEVICE_ATTR_RO(groups);
+
static struct attribute *edgetpu_dev_attrs[] = {
&dev_attr_firmware_crash_count.attr,
&dev_attr_watchdog_timeout_count.attr,
&dev_attr_clients.attr,
+ &dev_attr_groups.attr,
NULL,
};
diff --git a/drivers/edgetpu/edgetpu-google-iommu.c b/drivers/edgetpu/edgetpu-google-iommu.c
index 851a326..0bd68b8 100644
--- a/drivers/edgetpu/edgetpu-google-iommu.c
+++ b/drivers/edgetpu/edgetpu-google-iommu.c
@@ -12,8 +12,8 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/version.h>
+#include "edgetpu-config.h"
#include "edgetpu-internal.h"
#include "edgetpu-mapping.h"
#include "edgetpu-mmu.h"
@@ -89,12 +89,6 @@ get_domain_by_context_id(struct edgetpu_dev *etdev,
return domain;
}
-/*
- * Kernel 5.3 introduced iommu_register_device_fault_handler
- */
-
-#if KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE
-
static int edgetpu_iommu_dev_fault_handler(struct iommu_fault *fault,
void *token)
{
@@ -107,8 +101,7 @@ static int edgetpu_iommu_dev_fault_handler(struct iommu_fault *fault,
etdev_warn(etdev, "pasid = %08X\n", fault->event.pasid);
etdev_warn(etdev, "perms = %08X\n", fault->event.perm);
etdev_warn(etdev, "addr = %llX\n", fault->event.addr);
- etdev_warn(etdev, "fetch_addr = %llX\n",
- fault->event.fetch_addr);
+ etdev_warn(etdev, "fetch_addr = %llX\n", fault->event.fetch_addr);
} else if (fault->type == IOMMU_FAULT_PAGE_REQ) {
etdev_dbg(etdev, "IOMMU page request fault!\n");
etdev_dbg(etdev, "flags = %08X\n", fault->prm.flags);
@@ -121,37 +114,19 @@ static int edgetpu_iommu_dev_fault_handler(struct iommu_fault *fault,
return -EAGAIN;
}
-static int
-edgetpu_register_iommu_device_fault_handler(struct edgetpu_dev *etdev)
+static int edgetpu_register_iommu_device_fault_handler(struct edgetpu_dev *etdev)
{
etdev_dbg(etdev, "Registering IOMMU device fault handler\n");
- return iommu_register_device_fault_handler(
- etdev->dev, edgetpu_iommu_dev_fault_handler, etdev);
+ return iommu_register_device_fault_handler(etdev->dev, edgetpu_iommu_dev_fault_handler,
+ etdev);
}
-static int
-edgetpu_unregister_iommu_device_fault_handler(struct edgetpu_dev *etdev)
+static int edgetpu_unregister_iommu_device_fault_handler(struct edgetpu_dev *etdev)
{
etdev_dbg(etdev, "Unregistering IOMMU device fault handler\n");
return iommu_unregister_device_fault_handler(etdev->dev);
}
-#else /* kernel version before 5.3 */
-
-static int
-edgetpu_register_iommu_device_fault_handler(struct edgetpu_dev *etdev)
-{
- return 0;
-}
-
-static int
-edgetpu_unregister_iommu_device_fault_handler(struct edgetpu_dev *etdev)
-{
- return 0;
-}
-
-#endif /* KERNEL_VERSION(5, 3, 0) <= LINUX_VERSION_CODE */
-
/* A callback for idr_for_each to release the domains */
static int edgetpu_idr_free_domain_callback(int id, void *p, void *data)
{
@@ -320,11 +295,11 @@ int edgetpu_mmu_reattach(struct edgetpu_dev *etdev)
static int get_iommu_map_params(struct edgetpu_dev *etdev,
struct edgetpu_mapping *map,
enum edgetpu_context_id context_id,
- struct edgetpu_iommu_map_params *params)
+ struct edgetpu_iommu_map_params *params, u32 mmu_flags)
{
struct edgetpu_iommu *etiommu = etdev->mmu_cookie;
size_t size = 0;
- int prot = __dma_dir_to_iommu_prot(map->dir, etdev->dev);
+ int prot = mmu_flag_to_iommu_prot(mmu_flags, etdev->dev, map->dir);
struct iommu_domain *domain;
int i;
struct scatterlist *sg;
@@ -357,7 +332,7 @@ int edgetpu_mmu_map(struct edgetpu_dev *etdev, struct edgetpu_mapping *map,
struct iommu_domain *default_domain =
iommu_get_domain_for_dev(etdev->dev);
- ret = get_iommu_map_params(etdev, map, context_id, &params);
+ ret = get_iommu_map_params(etdev, map, context_id, &params, mmu_flags);
if (ret)
return ret;
@@ -367,8 +342,7 @@ int edgetpu_mmu_map(struct edgetpu_dev *etdev, struct edgetpu_mapping *map,
"%s: 64-bit addressing is not supported",
__func__);
- ret = dma_map_sg_attrs(etdev->dev, map->sgt.sgl, map->sgt.nents,
- edgetpu_host_dma_dir(map->dir), map->dma_attrs);
+ ret = dma_map_sg_attrs(etdev->dev, map->sgt.sgl, map->sgt.nents, map->dir, map->dma_attrs);
if (!ret)
return -EINVAL;
map->sgt.nents = ret;
@@ -383,9 +357,7 @@ int edgetpu_mmu_map(struct edgetpu_dev *etdev, struct edgetpu_mapping *map,
if (!iommu_map_sg(params.domain, iova, map->sgt.sgl,
map->sgt.orig_nents, params.prot)) {
/* Undo the mapping in the default domain */
- dma_unmap_sg_attrs(etdev->dev, map->sgt.sgl,
- map->sgt.orig_nents,
- edgetpu_host_dma_dir(map->dir),
+ dma_unmap_sg_attrs(etdev->dev, map->sgt.sgl, map->sgt.orig_nents, map->dir,
DMA_ATTR_SKIP_CPU_SYNC);
return -ENOMEM;
}
@@ -403,7 +375,7 @@ void edgetpu_mmu_unmap(struct edgetpu_dev *etdev, struct edgetpu_mapping *map,
struct iommu_domain *default_domain =
iommu_get_domain_for_dev(etdev->dev);
- ret = get_iommu_map_params(etdev, map, context_id, &params);
+ ret = get_iommu_map_params(etdev, map, context_id, &params, 0);
if (!ret && params.domain != default_domain) {
/*
* If this is a per-context mapping, it was mirrored in the
@@ -413,15 +385,15 @@ void edgetpu_mmu_unmap(struct edgetpu_dev *etdev, struct edgetpu_mapping *map,
}
/* Undo the mapping in the default domain */
- dma_unmap_sg_attrs(etdev->dev, map->sgt.sgl, map->sgt.orig_nents,
- edgetpu_host_dma_dir(map->dir), map->dma_attrs);
+ dma_unmap_sg_attrs(etdev->dev, map->sgt.sgl, map->sgt.orig_nents, map->dir, map->dma_attrs);
}
int edgetpu_mmu_map_iova_sgt(struct edgetpu_dev *etdev, tpu_addr_t iova,
struct sg_table *sgt, enum dma_data_direction dir,
+ u32 mmu_flags,
enum edgetpu_context_id context_id)
{
- const int prot = __dma_dir_to_iommu_prot(edgetpu_host_dma_dir(dir), etdev->dev);
+ const int prot = mmu_flag_to_iommu_prot(mmu_flags, etdev->dev, dir);
const tpu_addr_t orig_iova = iova;
struct scatterlist *sg;
int i;
@@ -510,7 +482,7 @@ tpu_addr_t edgetpu_mmu_tpu_map(struct edgetpu_dev *etdev, dma_addr_t down_addr,
struct iommu_domain *default_domain =
iommu_get_domain_for_dev(etdev->dev);
phys_addr_t paddr;
- int prot = __dma_dir_to_iommu_prot(dir, etdev->dev);
+ int prot = mmu_flag_to_iommu_prot(mmu_flags, etdev->dev, dir);
domain = get_domain_by_context_id(etdev, context_id);
/*
@@ -551,8 +523,7 @@ void edgetpu_mmu_tpu_unmap(struct edgetpu_dev *etdev, tpu_addr_t tpu_addr,
}
tpu_addr_t edgetpu_mmu_tpu_map_sgt(struct edgetpu_dev *etdev,
- struct sg_table *sgt,
- enum dma_data_direction dir,
+ struct sg_table *sgt, enum dma_data_direction dir,
enum edgetpu_context_id context_id,
u32 mmu_flags)
{
@@ -562,7 +533,7 @@ tpu_addr_t edgetpu_mmu_tpu_map_sgt(struct edgetpu_dev *etdev,
phys_addr_t paddr;
dma_addr_t iova, cur_iova;
size_t size;
- int prot = __dma_dir_to_iommu_prot(dir, etdev->dev);
+ int prot = mmu_flag_to_iommu_prot(mmu_flags, etdev->dev, dir);
struct scatterlist *sg;
int ret;
int i;
diff --git a/drivers/edgetpu/edgetpu-internal.h b/drivers/edgetpu/edgetpu-internal.h
index aec91d0..122a3e7 100644
--- a/drivers/edgetpu/edgetpu-internal.h
+++ b/drivers/edgetpu/edgetpu-internal.h
@@ -36,7 +36,7 @@
#include "edgetpu-usage-stats.h"
#define get_dev_for_logging(etdev) \
- ((etdev)->etiface->etcdev ? (etdev)->etiface->etcdev : (etdev)->dev)
+ ((etdev)->etiface && (etdev)->etiface->etcdev ? (etdev)->etiface->etcdev : (etdev)->dev)
#define etdev_err(etdev, fmt, ...) dev_err(get_dev_for_logging(etdev), fmt, ##__VA_ARGS__)
#define etdev_warn(etdev, fmt, ...) \
@@ -56,17 +56,7 @@
dev_warn_once(get_dev_for_logging(etdev), fmt, ##__VA_ARGS__)
/* The number of TPU tiles in an edgetpu chip */
-#ifdef CONFIG_EDGETPU_FPGA
-#define EDGETPU_NTILES 4
-#else
#define EDGETPU_NTILES 16
-#endif
-
-/* Up to 7 concurrent device groups / workloads per device. */
-#define EDGETPU_NGROUPS 7
-
-/* 1 context per VII/group plus 1 for KCI */
-#define EDGETPU_NCONTEXTS (EDGETPU_NGROUPS + 1)
/*
* Common-layer context IDs for non-secure TPU access, translated to chip-
@@ -75,8 +65,7 @@
enum edgetpu_context_id {
EDGETPU_CONTEXT_INVALID = -1,
EDGETPU_CONTEXT_KCI = 0, /* TPU firmware/kernel ID 0 */
- EDGETPU_CONTEXT_VII_BASE = 1, /* groups 0-6 IDs 1-7 */
- /* contexts 8 and above not yet allocated */
+ EDGETPU_CONTEXT_VII_BASE = 1, /* groups IDs starts from 1 to (EDGETPU_CONTEXTS - 1) */
/* A bit mask to mark the context is an IOMMU domain token */
EDGETPU_CONTEXT_DOMAIN_TOKEN = 1 << 30,
};
@@ -88,6 +77,7 @@ struct edgetpu_coherent_mem {
dma_addr_t dma_addr; /* DMA handle for downstream IOMMU, if any */
tpu_addr_t tpu_addr; /* DMA handle for TPU internal IOMMU, if any */
u64 host_addr; /* address mapped on host for debugging */
+ u64 phys_addr; /* physical address, if available */
size_t size;
#ifdef CONFIG_X86
bool is_set_uc; /* memory has been marked uncached on X86 */
@@ -150,7 +140,7 @@ struct edgetpu_list_device_client {
struct edgetpu_client *client;
};
-/* Macro to loop through etdev->clients (hold clients_lock prior). */
+/* loop through etdev->clients (hold clients_lock prior). */
#define for_each_list_device_client(etdev, c) \
list_for_each_entry(c, &etdev->clients, list)
@@ -185,6 +175,7 @@ enum edgetpu_dev_state {
struct edgetpu_dev {
struct device *dev; /* platform/pci bus device */
uint num_ifaces; /* Number of device interfaces */
+ uint num_cores; /* Number of cores */
/*
* Array of device interfaces
* First element is the default interface
@@ -359,6 +350,8 @@ void edgetpu_free_coherent(struct edgetpu_dev *etdev,
struct edgetpu_coherent_mem *mem,
enum edgetpu_context_id context_id);
+/* Checks if @file belongs to edgetpu driver */
+bool is_edgetpu_file(struct file *file);
/* External drivers can hook up to edgetpu driver using these calls. */
int edgetpu_open(struct edgetpu_dev_iface *etiface, struct file *file);
@@ -389,7 +382,9 @@ int edgetpu_device_add(struct edgetpu_dev *etdev,
const struct edgetpu_iface_params *iface_params,
uint num_ifaces);
void edgetpu_device_remove(struct edgetpu_dev *etdev);
+/* Registers IRQ. */
int edgetpu_register_irq(struct edgetpu_dev *etdev, int irq);
+/* Reverts edgetpu_register_irq */
void edgetpu_unregister_irq(struct edgetpu_dev *etdev, int irq);
/* Core -> Device FS API */
@@ -482,4 +477,10 @@ int edgetpu_chip_acquire_ext_mailbox(struct edgetpu_client *client,
int edgetpu_chip_release_ext_mailbox(struct edgetpu_client *client,
struct edgetpu_ext_mailbox_ioctl *args);
+/*
+ * Chip specific function to get indexes of external mailbox based on
+ * @mbox_type
+ */
+int edgetpu_chip_get_ext_mailbox_index(u32 mbox_type, u32 *start, u32 *end);
+
#endif /* __EDGETPU_INTERNAL_H__ */
diff --git a/drivers/edgetpu/edgetpu-iremap-pool.c b/drivers/edgetpu/edgetpu-iremap-pool.c
index 91b8fea..3858fc0 100644
--- a/drivers/edgetpu/edgetpu-iremap-pool.c
+++ b/drivers/edgetpu/edgetpu-iremap-pool.c
@@ -10,7 +10,6 @@
#include <linux/genalloc.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
#include "edgetpu-internal.h"
@@ -24,7 +23,6 @@ struct edgetpu_mempool {
tpu_addr_t base_tpu_addr;
phys_addr_t base_phys_addr;
size_t granule;
- struct mutex lock;
};
int edgetpu_iremap_pool_create(struct edgetpu_dev *etdev, void *base_vaddr,
@@ -44,8 +42,6 @@ int edgetpu_iremap_pool_create(struct edgetpu_dev *etdev, void *base_vaddr,
if (!pool)
return -ENOMEM;
- mutex_init(&pool->lock);
-
pool->gen_pool = gen_pool_create(ilog2(granule), -1);
if (!pool->gen_pool) {
kfree(pool);
@@ -88,21 +84,21 @@ int edgetpu_iremap_alloc(struct edgetpu_dev *etdev, size_t size,
if (!etmempool)
return edgetpu_alloc_coherent(etdev, size, mem, context_id);
- mutex_lock(&etmempool->lock);
+
size = __ALIGN_KERNEL(size, etmempool->granule);
addr = gen_pool_alloc(etmempool->gen_pool, size);
- if (!addr) {
- mutex_unlock(&etmempool->lock);
+ if (!addr)
return -ENOMEM;
- }
+
mem->vaddr = (void *)addr;
offset = mem->vaddr - etmempool->base_vaddr;
mem->dma_addr = etmempool->base_dma_addr + offset;
mem->tpu_addr = etmempool->base_tpu_addr + offset;
+ mem->phys_addr = etmempool->base_phys_addr + offset;
mem->size = size;
- etdev_dbg(etdev, "%s @ %llx IOVA = %llx size = %zu",
- __func__, (u64)mem->vaddr, mem->dma_addr, size);
- mutex_unlock(&etmempool->lock);
+ etdev_dbg(etdev, "%s @ %pK IOVA = %#llx size = %zu",
+ __func__, mem->vaddr, mem->dma_addr, size);
+
return 0;
}
@@ -116,13 +112,12 @@ void edgetpu_iremap_free(struct edgetpu_dev *etdev,
edgetpu_free_coherent(etdev, mem, context_id);
return;
}
- mutex_lock(&etmempool->lock);
+
etdev_dbg(etdev, "%s @ %llx IOVA = %llx size = %zu",
__func__, (u64)mem->vaddr, mem->dma_addr, mem->size);
gen_pool_free(etmempool->gen_pool, (unsigned long)mem->vaddr,
mem->size);
mem->vaddr = NULL;
- mutex_unlock(&etmempool->lock);
}
int edgetpu_iremap_mmap(struct edgetpu_dev *etdev, struct vm_area_struct *vma,
diff --git a/drivers/edgetpu/edgetpu-kci.c b/drivers/edgetpu/edgetpu-kci.c
index 1f04ecb..bfa356f 100644
--- a/drivers/edgetpu/edgetpu-kci.c
+++ b/drivers/edgetpu/edgetpu-kci.c
@@ -10,6 +10,7 @@
#include <linux/circ_buf.h>
#include <linux/device.h>
#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h> /* memcpy */
@@ -29,9 +30,10 @@
#define QUEUE_SIZE MAX_QUEUE_SIZE
/* Timeout for KCI responses from the firmware (milliseconds) */
-#if IS_ENABLED(CONFIG_EDGETPU_FPGA)
-/* Set extra ludicrously high to 60 seconds for (slow) Palladium emulation. */
-#define KCI_TIMEOUT (60000)
+#ifdef EDGETPU_KCI_TIMEOUT
+
+#define KCI_TIMEOUT EDGETPU_KCI_TIMEOUT
+
#elif IS_ENABLED(CONFIG_EDGETPU_TEST)
/* fake-firmware could respond in a short time */
#define KCI_TIMEOUT (200)
@@ -105,7 +107,7 @@ edgetpu_reverse_kci_consume_response(struct edgetpu_dev *etdev,
edgetpu_handle_job_lockup(etdev, resp->retval);
break;
default:
- etdev_warn(etdev, "%s: Unrecognized KCI request: 0x%x\n",
+ etdev_warn(etdev, "%s: Unrecognized KCI request: %#x\n",
__func__, resp->code);
}
}
@@ -294,7 +296,7 @@ static struct edgetpu_kci_response_element *edgetpu_kci_fetch_responses(
CIRCULAR_QUEUE_REAL_INDEX(tail) >= size)) {
etdev_err_ratelimited(
kci->mailbox->etdev,
- "Invalid response queue tail: 0x%x\n", tail);
+ "Invalid response queue tail: %#x\n", tail);
break;
}
@@ -469,7 +471,7 @@ int edgetpu_kci_init(struct edgetpu_mailbox_manager *mgr,
kci->cmd_queue = kci->cmd_queue_mem.vaddr;
mutex_init(&kci->cmd_queue_lock);
- etdev_dbg(mgr->etdev, "%s: cmdq kva=%pK iova=0x%llx dma=%pad", __func__,
+ etdev_dbg(mgr->etdev, "%s: cmdq kva=%pK iova=%#llx dma=%pad", __func__,
kci->cmd_queue_mem.vaddr, kci->cmd_queue_mem.tpu_addr,
&kci->cmd_queue_mem.dma_addr);
@@ -482,7 +484,7 @@ int edgetpu_kci_init(struct edgetpu_mailbox_manager *mgr,
}
kci->resp_queue = kci->resp_queue_mem.vaddr;
spin_lock_init(&kci->resp_queue_lock);
- etdev_dbg(mgr->etdev, "%s: rspq kva=%pK iova=0x%llx dma=%pad", __func__,
+ etdev_dbg(mgr->etdev, "%s: rspq kva=%pK iova=%#llx dma=%pad", __func__,
kci->resp_queue_mem.vaddr, kci->resp_queue_mem.tpu_addr,
&kci->resp_queue_mem.dma_addr);
@@ -714,14 +716,14 @@ static int edgetpu_kci_send_cmd_with_data(struct edgetpu_kci *kci,
return ret;
memcpy(mem.vaddr, data, size);
- etdev_dbg(etdev, "%s: map kva=%pK iova=0x%llx dma=%pad", __func__, mem.vaddr, mem.tpu_addr,
+ etdev_dbg(etdev, "%s: map kva=%pK iova=%#llx dma=%pad", __func__, mem.vaddr, mem.tpu_addr,
&mem.dma_addr);
cmd->dma.address = mem.tpu_addr;
cmd->dma.size = size;
ret = edgetpu_kci_send_cmd(kci, cmd);
edgetpu_iremap_free(etdev, &mem, EDGETPU_CONTEXT_KCI);
- etdev_dbg(etdev, "%s: unmap kva=%pK iova=0x%llx dma=%pad", __func__, mem.vaddr,
+ etdev_dbg(etdev, "%s: unmap kva=%pK iova=%#llx dma=%pad", __func__, mem.vaddr,
mem.tpu_addr, &mem.dma_addr);
return ret;
}
@@ -852,7 +854,7 @@ enum edgetpu_fw_flavor edgetpu_kci_fw_info(struct edgetpu_kci *kci,
flavor = fw_info->fw_flavor;
break;
default:
- etdev_dbg(etdev, "unrecognized fw flavor 0x%x\n",
+ etdev_dbg(etdev, "unrecognized fw flavor %#x\n",
fw_info->fw_flavor);
}
} else {
@@ -953,17 +955,21 @@ void edgetpu_kci_mappings_show(struct edgetpu_dev *etdev, struct seq_file *s)
if (!kci || !kci->mailbox)
return;
- seq_printf(s, "kci context %u:\n", EDGETPU_CONTEXT_KCI);
- seq_printf(s, " 0x%llx %lu cmdq - %pad\n",
+ seq_printf(s, "kci context mbox %u:\n", EDGETPU_CONTEXT_KCI);
+ seq_printf(s, " %#llx %lu cmdq - %pad\n",
kci->cmd_queue_mem.tpu_addr,
- QUEUE_SIZE *
- edgetpu_kci_queue_element_size(MAILBOX_CMD_QUEUE)
- / PAGE_SIZE, &kci->cmd_queue_mem.dma_addr);
- seq_printf(s, " 0x%llx %lu rspq - %pad\n",
+ DIV_ROUND_UP(
+ QUEUE_SIZE *
+ edgetpu_kci_queue_element_size(MAILBOX_CMD_QUEUE),
+ PAGE_SIZE),
+ &kci->cmd_queue_mem.dma_addr);
+ seq_printf(s, " %#llx %lu rspq - %pad\n",
kci->resp_queue_mem.tpu_addr,
- QUEUE_SIZE *
- edgetpu_kci_queue_element_size(MAILBOX_RESP_QUEUE)
- / PAGE_SIZE, &kci->resp_queue_mem.dma_addr);
+ DIV_ROUND_UP(
+ QUEUE_SIZE *
+ edgetpu_kci_queue_element_size(MAILBOX_RESP_QUEUE),
+ PAGE_SIZE),
+ &kci->resp_queue_mem.dma_addr);
edgetpu_telemetry_mappings_show(etdev, s);
edgetpu_firmware_mappings_show(etdev, s);
}
@@ -1040,12 +1046,25 @@ int edgetpu_kci_notify_throttling(struct edgetpu_dev *etdev, u32 level)
.flags = level,
},
};
- int ret;
if (!etdev->kci)
return -ENODEV;
- ret = edgetpu_kci_send_cmd(etdev->kci, &cmd);
- return ret;
+ return edgetpu_kci_send_cmd(etdev->kci, &cmd);
}
+
+int edgetpu_kci_block_bus_speed_control(struct edgetpu_dev *etdev, bool block)
+{
+ struct edgetpu_command_element cmd = {
+ .code = KCI_CODE_BLOCK_BUS_SPEED_CONTROL,
+ .dma = {
+ .flags = (u32) block,
+ },
+ };
+
+ if (!etdev->kci)
+ return -ENODEV;
+
+ return edgetpu_kci_send_cmd(etdev->kci, &cmd);
+}
diff --git a/drivers/edgetpu/edgetpu-kci.h b/drivers/edgetpu/edgetpu-kci.h
index 2893f20..acdf31a 100644
--- a/drivers/edgetpu/edgetpu-kci.h
+++ b/drivers/edgetpu/edgetpu-kci.h
@@ -109,6 +109,7 @@ enum edgetpu_kci_code {
KCI_CODE_FIRMWARE_INFO = 11,
KCI_CODE_GET_USAGE = 12,
KCI_CODE_NOTIFY_THROTTLING = 13,
+ KCI_CODE_BLOCK_BUS_SPEED_CONTROL = 14,
};
/*
@@ -393,4 +394,11 @@ void edgetpu_kci_cancel_work_queues(struct edgetpu_kci *kci);
*/
int edgetpu_kci_notify_throttling(struct edgetpu_dev *etdev, u32 level);
+/*
+ * Request the firmware to {un}block modulating bus clock speeds
+ *
+ * Used to prevent conflicts when sending a thermal policy request
+ */
+int edgetpu_kci_block_bus_speed_control(struct edgetpu_dev *etdev, bool block);
+
#endif /* __EDGETPU_KCI_H__ */
diff --git a/drivers/edgetpu/edgetpu-mailbox.c b/drivers/edgetpu/edgetpu-mailbox.c
index 261eb52..a950e2c 100644
--- a/drivers/edgetpu/edgetpu-mailbox.c
+++ b/drivers/edgetpu/edgetpu-mailbox.c
@@ -6,6 +6,7 @@
*/
#include <asm/page.h>
+#include <linux/bitops.h>
#include <linux/bits.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
@@ -448,7 +449,7 @@ int edgetpu_mailbox_init_vii(struct edgetpu_vii *vii,
}
etdev_dbg(group->etdev,
- "%s: mbox %u cmdq iova=0x%llx dma=%pad\n",
+ "%s: mbox %u cmdq iova=%#llx dma=%pad\n",
__func__, mailbox->mailbox_id, vii->cmd_queue_mem.tpu_addr,
&vii->cmd_queue_mem.dma_addr);
ret = edgetpu_mailbox_alloc_queue(group->etdev, mailbox,
@@ -464,7 +465,7 @@ int edgetpu_mailbox_init_vii(struct edgetpu_vii *vii,
}
etdev_dbg(group->etdev,
- "%s: mbox %u rspq iova=0x%llx dma=%pad\n",
+ "%s: mbox %u rspq iova=%#llx dma=%pad\n",
__func__, mailbox->mailbox_id, vii->resp_queue_mem.tpu_addr,
&vii->resp_queue_mem.dma_addr);
mailbox->internal.group = edgetpu_device_group_get(group);
@@ -507,10 +508,6 @@ int edgetpu_mailbox_alloc_queue(struct edgetpu_dev *etdev,
u32 size = unit * queue_size;
int ret;
- /* checks integer overflow */
- if (queue_size > SIZE_MAX / unit)
- return -ENOMEM;
-
/* Align queue size to page size for TPU MMU map. */
size = __ALIGN_KERNEL(size, PAGE_SIZE);
ret = edgetpu_iremap_alloc(etdev, size, mem,
@@ -846,10 +843,10 @@ static bool edgetpu_mailbox_external_check_range(struct edgetpu_mailbox_manager
static int edgetpu_mailbox_external_alloc(struct edgetpu_device_group *group,
struct edgetpu_external_mailbox_req *ext_mailbox_req)
{
- u32 i, j = 0;
+ u32 i, j = 0, bmap, start, end;
struct edgetpu_mailbox_manager *mgr = group->etdev->mailbox_manager;
struct edgetpu_mailbox *mailbox;
- int ret = 0, c = 0, count;
+ int ret = 0, count;
struct edgetpu_external_mailbox *ext_mailbox;
struct edgetpu_mailbox_attr attr;
unsigned long flags;
@@ -857,7 +854,10 @@ static int edgetpu_mailbox_external_alloc(struct edgetpu_device_group *group,
if (!ext_mailbox_req)
return -EINVAL;
- count = ext_mailbox_req->count;
+ ret = edgetpu_mailbox_validate_attr(&ext_mailbox_req->attr);
+ if (ret)
+ return ret;
+
attr = ext_mailbox_req->attr;
if (!edgetpu_mailbox_external_check_range(mgr, ext_mailbox_req->start,
@@ -868,6 +868,9 @@ static int edgetpu_mailbox_external_alloc(struct edgetpu_device_group *group,
if (!ext_mailbox)
return -ENOMEM;
+ bmap = ext_mailbox_req->mbox_map;
+ count = __sw_hweight32(bmap);
+
ext_mailbox->descriptors =
kcalloc(count, sizeof(struct edgetpu_mailbox_descriptor), GFP_KERNEL);
if (!ext_mailbox->descriptors) {
@@ -876,31 +879,42 @@ static int edgetpu_mailbox_external_alloc(struct edgetpu_device_group *group,
}
ext_mailbox->attr = attr;
- ext_mailbox->count = count;
ext_mailbox->etdev = group->etdev;
+ ext_mailbox->mbox_type = ext_mailbox_req->mbox_type;
+
+ start = ext_mailbox_req->start;
+ end = ext_mailbox_req->end;
write_lock_irqsave(&mgr->mailboxes_lock, flags);
- for (i = ext_mailbox_req->start; i <= ext_mailbox_req->end; i++) {
- if (!mgr->mailboxes[i])
- c++;
- }
- if (c < count) {
- ret = -EBUSY;
- goto unlock;
+ while (bmap) {
+ i = ffs(bmap) + start - 1;
+ if (i > end) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ if (mgr->mailboxes[i]) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+ bmap = bmap & (bmap - 1);
}
- for (i = ext_mailbox_req->start; i <= ext_mailbox_req->end && j < count; i++) {
- if (!mgr->mailboxes[i]) {
- mailbox = edgetpu_mailbox_create_locked(mgr, i);
- if (!IS_ERR(mailbox)) {
- mgr->mailboxes[i] = mailbox;
- ext_mailbox->descriptors[j++].mailbox = mailbox;
- } else {
- goto release;
- }
+ bmap = ext_mailbox_req->mbox_map;
+ while (bmap) {
+ i = ffs(bmap) + start - 1;
+ mailbox = edgetpu_mailbox_create_locked(mgr, i);
+ if (!IS_ERR(mailbox)) {
+ mgr->mailboxes[i] = mailbox;
+ ext_mailbox->descriptors[j++].mailbox = mailbox;
+ } else {
+ ret = PTR_ERR(mailbox);
+ goto release;
}
+ bmap = bmap & (bmap - 1);
}
+ ext_mailbox->count = j;
+
ret = edgetpu_mailbox_external_alloc_queue_batch(ext_mailbox);
if (ret)
goto release;
diff --git a/drivers/edgetpu/edgetpu-mailbox.h b/drivers/edgetpu/edgetpu-mailbox.h
index c4b1318..1d284e6 100644
--- a/drivers/edgetpu/edgetpu-mailbox.h
+++ b/drivers/edgetpu/edgetpu-mailbox.h
@@ -88,10 +88,17 @@ struct edgetpu_mailbox_descriptor {
edgetpu_queue_mem resp_queue_mem;
};
+enum edgetpu_ext_mailbox_type {
+ EDGETPU_EXTERNAL_MAILBOX_TYPE_DSP,
+ EDGETPU_EXTERNAL_MAILBOX_TYPE_AOC,
+};
+
/* Structure to hold multiple external mailboxes allocated for a device group. */
struct edgetpu_external_mailbox {
/* Number of external mailboxes allocated for a device group. */
- int count;
+ u32 count;
+ /* Type of external mailbox */
+ enum edgetpu_ext_mailbox_type mbox_type;
/* Leader of device group. */
struct edgetpu_dev *etdev;
/* Array of external mailboxes info with length @count. */
@@ -104,8 +111,8 @@ struct edgetpu_external_mailbox {
struct edgetpu_external_mailbox_req {
uint start; /* starting index of external mailbox in mailbox_manager */
uint end; /* end index of external mailbox in mailbox_manager */
- /* number of mailboxes to be allocated, should be less or equal to (end - start + 1) */
- uint count;
+ uint mbox_map; /* bitmap of mailbox indexes to be allocated */
+ enum edgetpu_ext_mailbox_type mbox_type; /* Type of external mailbox */
struct edgetpu_mailbox_attr attr; /* mailbox attribute for allocation */
};
diff --git a/drivers/edgetpu/edgetpu-mapping.c b/drivers/edgetpu/edgetpu-mapping.c
index 136cf29..fbffcce 100644
--- a/drivers/edgetpu/edgetpu-mapping.c
+++ b/drivers/edgetpu/edgetpu-mapping.c
@@ -152,3 +152,21 @@ void edgetpu_mappings_show(struct edgetpu_mapping_root *mappings,
edgetpu_mapping_unlock(mappings);
}
+
+size_t edgetpu_mappings_total_size(struct edgetpu_mapping_root *mappings)
+{
+ struct rb_node *node;
+ size_t total = 0;
+
+ edgetpu_mapping_lock(mappings);
+
+ for (node = rb_first(&mappings->rb); node; node = rb_next(node)) {
+ struct edgetpu_mapping *map =
+ container_of(node, struct edgetpu_mapping, node);
+
+ total += map->map_size;
+ }
+
+ edgetpu_mapping_unlock(mappings);
+ return total;
+}
diff --git a/drivers/edgetpu/edgetpu-mapping.h b/drivers/edgetpu/edgetpu-mapping.h
index e3a0dc9..8f075aa 100644
--- a/drivers/edgetpu/edgetpu-mapping.h
+++ b/drivers/edgetpu/edgetpu-mapping.h
@@ -21,6 +21,7 @@
#endif
#include "edgetpu-internal.h"
+#include "edgetpu-mmu.h"
struct edgetpu_mapping_root {
struct rb_root rb;
@@ -39,6 +40,8 @@ struct edgetpu_mapping {
u64 host_address;
u32 die_index; /* this mapping is mapped on the @die_index-th die */
tpu_addr_t device_address;
+ /* Size of buffer mapped in bytes. Always set. */
+ size_t map_size;
/*
* The size used for allocating @alloc_iova in bytes. This field may be
* set by edgetpu_mmu_map().
@@ -144,24 +147,36 @@ void edgetpu_mapping_clear(struct edgetpu_mapping_root *mappings);
void edgetpu_mappings_show(struct edgetpu_mapping_root *mappings,
struct seq_file *s);
-static inline int __dma_dir_to_iommu_prot(enum dma_data_direction dir, struct device *dev)
+static inline int __dma_dir_to_iommu_prot(enum dma_data_direction dir)
{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
- int prot = dev_is_dma_coherent(dev) ? IOMMU_CACHE : 0;
-#else
- int prot = 0; /* hardcode to non-dma-coherent for prior kernels */
-#endif
-
switch (dir) {
case DMA_BIDIRECTIONAL:
- return prot | IOMMU_READ | IOMMU_WRITE;
+ return IOMMU_READ | IOMMU_WRITE;
case DMA_TO_DEVICE:
- return prot | IOMMU_READ;
+ return IOMMU_READ;
case DMA_FROM_DEVICE:
- return prot | IOMMU_WRITE;
+ return IOMMU_WRITE;
default:
return 0;
}
}
+/* Returns iommu prot based on @flags and @dir */
+static inline int mmu_flag_to_iommu_prot(u32 mmu_flags, struct device *dev,
+ enum dma_data_direction dir)
+{
+ int prot = 0; /* hardcode to non-dma-coherent for prior kernels */
+
+ if (mmu_flags & EDGETPU_MMU_COHERENT) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
+ prot = dev_is_dma_coherent(dev) ? IOMMU_CACHE : 0;
+#endif
+ }
+ prot |= __dma_dir_to_iommu_prot(dir);
+ return prot;
+}
+
+/* Return total size of mappings under the supplied root. */
+size_t edgetpu_mappings_total_size(struct edgetpu_mapping_root *mappings);
+
#endif /* __EDGETPU_MAPPING_H__ */
diff --git a/drivers/edgetpu/edgetpu-mmu.h b/drivers/edgetpu/edgetpu-mmu.h
index 7cc9ffa..812a05c 100644
--- a/drivers/edgetpu/edgetpu-mmu.h
+++ b/drivers/edgetpu/edgetpu-mmu.h
@@ -37,6 +37,8 @@
#define EDGETPU_MMU_DEVICE (1 << 2)
#define EDGETPU_MMU_DMABUF (2 << 2)
+#define EDGETPU_MMU_COHERENT (1 << 4)
+
/*
* The max possible value of token is (EDGETPU_DOMAIN_TOKEN_END - 1), which
* shouldn't equal or exceed the bit mask EDGETPU_CONTEXT_DOMAIN_TOKEN.
@@ -75,6 +77,11 @@ edgetpu_host_dma_dir(enum dma_data_direction target_dir)
}
}
+static inline enum dma_data_direction map_flag_to_host_dma_dir(edgetpu_map_flag_t flags)
+{
+ return edgetpu_host_dma_dir(flags & EDGETPU_MAP_DIR_MASK);
+}
+
static inline u32 map_to_mmu_flags(edgetpu_map_flag_t flags)
{
u32 ret = 0;
@@ -82,6 +89,7 @@ static inline u32 map_to_mmu_flags(edgetpu_map_flag_t flags)
ret |= IS_MIRRORED(flags) ? EDGETPU_MMU_VDG : EDGETPU_MMU_DIE;
ret |= (flags & EDGETPU_MAP_CPU_NONACCESSIBLE) ? EDGETPU_MMU_64 :
EDGETPU_MMU_32;
+ ret |= (flags & EDGETPU_MAP_COHERENT) ? EDGETPU_MMU_COHERENT : 0;
return ret;
}
@@ -145,7 +153,7 @@ void edgetpu_mmu_unmap(struct edgetpu_dev *dev, struct edgetpu_mapping *map,
*/
int edgetpu_mmu_map_iova_sgt(struct edgetpu_dev *etdev, tpu_addr_t iova,
struct sg_table *sgt, enum dma_data_direction dir,
- enum edgetpu_context_id context_id);
+ u32 mmu_flags, enum edgetpu_context_id context_id);
void edgetpu_mmu_unmap_iova_sgt_attrs(struct edgetpu_dev *etdev,
tpu_addr_t iova, struct sg_table *sgt,
enum dma_data_direction dir,
diff --git a/drivers/edgetpu/edgetpu-mobile-platform.c b/drivers/edgetpu/edgetpu-mobile-platform.c
new file mode 100644
index 0000000..ffa8928
--- /dev/null
+++ b/drivers/edgetpu/edgetpu-mobile-platform.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common platform interfaces for mobile TPU chips.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/gsa/gsa_tpu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "edgetpu-config.h"
+#include "edgetpu-internal.h"
+#include "edgetpu-iremap-pool.h"
+#include "edgetpu-mmu.h"
+#include "edgetpu-mobile-platform.h"
+#include "edgetpu-telemetry.h"
+#include "mobile-firmware.h"
+#include "mobile-pm.h"
+
+/*
+ * Log and trace buffers at the beginning of the remapped region,
+ * pool memory afterwards.
+ */
+#define EDGETPU_POOL_MEM_OFFSET \
+ ((EDGETPU_TELEMETRY_LOG_BUFFER_SIZE + EDGETPU_TELEMETRY_TRACE_BUFFER_SIZE) * \
+ EDGETPU_NUM_CORES)
+
+static void get_telemetry_mem(struct edgetpu_mobile_platform_dev *etmdev,
+ enum edgetpu_telemetry_type type, struct edgetpu_coherent_mem *mem)
+{
+ int i, offset = type == EDGETPU_TELEMETRY_TRACE ? EDGETPU_TELEMETRY_LOG_BUFFER_SIZE : 0;
+ const size_t size = type == EDGETPU_TELEMETRY_LOG ? EDGETPU_TELEMETRY_LOG_BUFFER_SIZE :
+ EDGETPU_TELEMETRY_TRACE_BUFFER_SIZE;
+
+ for (i = 0; i < etmdev->edgetpu_dev.num_cores; i++) {
+ mem[i].vaddr = etmdev->shared_mem_vaddr + offset;
+ mem[i].dma_addr = EDGETPU_REMAPPED_DATA_ADDR + offset;
+ mem[i].tpu_addr = EDGETPU_REMAPPED_DATA_ADDR + offset;
+ mem[i].host_addr = 0;
+ mem[i].size = size;
+ offset += EDGETPU_TELEMETRY_LOG_BUFFER_SIZE + EDGETPU_TELEMETRY_TRACE_BUFFER_SIZE;
+ }
+}
+
+static void edgetpu_mobile_get_telemetry_mem(struct edgetpu_mobile_platform_dev *etmdev)
+{
+ get_telemetry_mem(etmdev, EDGETPU_TELEMETRY_LOG, etmdev->log_mem);
+ get_telemetry_mem(etmdev, EDGETPU_TELEMETRY_TRACE, etmdev->trace_mem);
+}
+
+static int edgetpu_platform_setup_fw_region(struct edgetpu_mobile_platform_dev *etmdev)
+{
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+ struct platform_device *gsa_pdev;
+ struct device *dev = etdev->dev;
+ struct resource r;
+ struct device_node *np;
+ int err;
+ size_t region_map_size = EDGETPU_FW_SIZE_MAX + EDGETPU_REMAPPED_DATA_SIZE;
+
+ np = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!np) {
+ dev_err(dev, "No memory region for firmware");
+ return -ENODEV;
+ }
+
+ err = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (err) {
+ dev_err(dev, "No memory address assigned to firmware region");
+ return err;
+ }
+
+ if (resource_size(&r) < region_map_size) {
+ dev_err(dev, "Memory region for firmware too small (%zu bytes needed, got %llu)",
+ region_map_size, resource_size(&r));
+ return -ENOSPC;
+ }
+
+ /* Get GSA device from device tree */
+ np = of_parse_phandle(dev->of_node, "gsa-device", 0);
+ if (!np) {
+ dev_warn(dev, "No gsa-device in device tree. Authentication not available");
+ } else {
+ gsa_pdev = of_find_device_by_node(np);
+ if (!gsa_pdev) {
+ dev_err(dev, "GSA device not found");
+ of_node_put(np);
+ return -ENODEV;
+ }
+ etmdev->gsa_dev = get_device(&gsa_pdev->dev);
+ of_node_put(np);
+ }
+
+ etmdev->fw_region_paddr = r.start;
+ etmdev->fw_region_size = EDGETPU_FW_SIZE_MAX;
+
+ etmdev->shared_mem_vaddr = memremap(r.start + EDGETPU_REMAPPED_DATA_OFFSET,
+ EDGETPU_REMAPPED_DATA_SIZE, MEMREMAP_WC);
+ if (!etmdev->shared_mem_vaddr) {
+ dev_err(dev, "Shared memory remap failed");
+ if (etmdev->gsa_dev)
+ put_device(etmdev->gsa_dev);
+ return -EINVAL;
+ }
+ etmdev->shared_mem_paddr = r.start + EDGETPU_REMAPPED_DATA_OFFSET;
+
+ return 0;
+}
+
+static void edgetpu_platform_cleanup_fw_region(struct edgetpu_mobile_platform_dev *etmdev)
+{
+ if (etmdev->gsa_dev) {
+ gsa_unload_tpu_fw_image(etmdev->gsa_dev);
+ put_device(etmdev->gsa_dev);
+ }
+ if (!etmdev->shared_mem_vaddr)
+ return;
+ memunmap(etmdev->shared_mem_vaddr);
+ etmdev->shared_mem_vaddr = NULL;
+}
+
+int edgetpu_chip_setup_mmu(struct edgetpu_dev *etdev)
+{
+ int ret;
+
+ ret = edgetpu_mmu_attach(etdev, NULL);
+ if (ret)
+ dev_err(etdev->dev, "failed to attach IOMMU: %d", ret);
+ return ret;
+}
+
+void edgetpu_chip_remove_mmu(struct edgetpu_dev *etdev)
+{
+ edgetpu_mmu_detach(etdev);
+}
+
+static int edgetpu_platform_parse_ssmt(struct edgetpu_mobile_platform_dev *etmdev)
+{
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+ struct platform_device *pdev = to_platform_device(etdev->dev);
+ struct resource *res;
+ int ret;
+ void __iomem *ssmt_base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ssmt");
+ if (!res) {
+ etdev_warn(etdev, "Failed to find SSMT register base");
+ return -EINVAL;
+ }
+ ssmt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ssmt_base)) {
+ ret = PTR_ERR(ssmt_base);
+ etdev_warn(etdev, "Failed to map SSMT register base: %d", ret);
+ return ret;
+ }
+ etmdev->ssmt_base = ssmt_base;
+ return 0;
+}
+
+static int edgetpu_platform_setup_irq(struct edgetpu_mobile_platform_dev *etmdev)
+{
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+ struct platform_device *pdev = to_platform_device(etdev->dev);
+ int n = platform_irq_count(pdev);
+ int ret;
+ int i;
+
+ etmdev->irq = devm_kmalloc_array(etdev->dev, n, sizeof(*etmdev->irq), GFP_KERNEL);
+ if (!etmdev->irq)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ etmdev->irq[i] = platform_get_irq(pdev, i);
+ ret = edgetpu_register_irq(etdev, etmdev->irq[i]);
+ if (ret)
+ goto rollback;
+ }
+ etmdev->n_irq = n;
+ return 0;
+
+rollback:
+ while (i--)
+ edgetpu_unregister_irq(etdev, etmdev->irq[i]);
+ return ret;
+}
+
+static void edgetpu_platform_remove_irq(struct edgetpu_mobile_platform_dev *etmdev)
+{
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+ int i;
+
+ for (i = 0; i < etmdev->n_irq; i++)
+ edgetpu_unregister_irq(etdev, etmdev->irq[i]);
+}
+
+static int edgetpu_mobile_platform_probe(struct platform_device *pdev,
+ struct edgetpu_mobile_platform_dev *etmdev)
+{
+ struct device *dev = &pdev->dev;
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+ struct resource *r;
+ struct edgetpu_mapped_resource regs;
+ int ret;
+ struct edgetpu_iface_params iface_params[] = {
+ /* Default interface */
+ { .name = NULL },
+ /* Common name for embedded SoC devices */
+ { .name = "edgetpu-soc" },
+ };
+
+ mutex_init(&etmdev->tz_mailbox_lock);
+
+ platform_set_drvdata(pdev, etdev);
+ etdev->dev = dev;
+ etdev->num_cores = EDGETPU_NUM_CORES;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR_OR_NULL(r)) {
+ dev_err(dev, "failed to get memory resource");
+ return -ENODEV;
+ }
+
+ regs.phys = r->start;
+ regs.size = resource_size(r);
+ regs.mem = devm_ioremap_resource(dev, r);
+ if (IS_ERR_OR_NULL(regs.mem)) {
+ dev_err(dev, "failed to map registers");
+ return -ENODEV;
+ }
+
+ mutex_init(&etmdev->platform_pwr.policy_lock);
+ etmdev->platform_pwr.curr_policy = TPU_POLICY_MAX;
+
+ ret = edgetpu_chip_pm_create(etdev);
+ if (ret) {
+ dev_err(dev, "Failed to initialize PM interface: %d", ret);
+ return ret;
+ }
+
+ ret = edgetpu_platform_setup_fw_region(etmdev);
+ if (ret) {
+ dev_err(dev, "setup fw regions failed: %d", ret);
+ goto out_shutdown;
+ }
+
+ ret = edgetpu_iremap_pool_create(etdev,
+ /* Base virtual address (kernel address space) */
+ etmdev->shared_mem_vaddr + EDGETPU_POOL_MEM_OFFSET,
+ /* Base DMA address */
+ EDGETPU_REMAPPED_DATA_ADDR + EDGETPU_POOL_MEM_OFFSET,
+ /* Base TPU address */
+ EDGETPU_REMAPPED_DATA_ADDR + EDGETPU_POOL_MEM_OFFSET,
+ /* Base physical address */
+ etmdev->shared_mem_paddr + EDGETPU_POOL_MEM_OFFSET,
+ /* Size */
+ EDGETPU_REMAPPED_DATA_SIZE - EDGETPU_POOL_MEM_OFFSET,
+ /* Granularity */
+ PAGE_SIZE);
+ if (ret) {
+ dev_err(dev, "failed to initialize remapped memory pool: %d", ret);
+ goto out_cleanup_fw;
+ }
+
+ etdev->mcp_id = -1;
+ etdev->mcp_die_index = 0;
+ ret = edgetpu_device_add(etdev, &regs, iface_params, ARRAY_SIZE(iface_params));
+ if (ret) {
+ dev_err(dev, "edgetpu setup failed: %d", ret);
+ goto out_destroy_iremap;
+ }
+
+ ret = edgetpu_platform_setup_irq(etmdev);
+ if (ret) {
+ dev_err(dev, "IRQ setup failed: %d", ret);
+ goto out_remove_device;
+ }
+
+ ret = edgetpu_platform_parse_ssmt(etmdev);
+ if (ret)
+ dev_warn(dev, "SSMT setup failed (%d). Context isolation not enforced", ret);
+
+ etmdev->log_mem = devm_kcalloc(dev, etdev->num_cores, sizeof(*etmdev->log_mem), GFP_KERNEL);
+ if (!etmdev->log_mem) {
+ ret = -ENOMEM;
+ goto out_remove_irq;
+ }
+
+#if IS_ENABLED(CONFIG_EDGETPU_TELEMETRY_TRACE)
+ etmdev->trace_mem =
+ devm_kcalloc(dev, etdev->num_cores, sizeof(*etmdev->log_mem), GFP_KERNEL);
+ if (!etmdev->trace_mem) {
+ ret = -ENOMEM;
+ goto out_remove_irq;
+ }
+#endif
+
+ edgetpu_mobile_get_telemetry_mem(etmdev);
+ ret = edgetpu_telemetry_init(etdev, etmdev->log_mem, etmdev->trace_mem);
+ if (ret)
+ goto out_remove_irq;
+
+ ret = edgetpu_mobile_firmware_create(etdev);
+ if (ret) {
+ dev_err(dev, "initialize firmware downloader failed: %d", ret);
+ goto out_tel_exit;
+ }
+
+ etdev_dbg(etdev, "Creating thermal device");
+ etdev->thermal = devm_tpu_thermal_create(etdev->dev, etdev);
+
+ if (etmdev->after_probe) {
+ ret = etmdev->after_probe(etmdev);
+ if (ret) {
+ dev_err(dev, "after_probe callback failed: %d", ret);
+ goto out_destroy_fw;
+ }
+ }
+
+ dev_info(dev, "%s edgetpu initialized. Build: %s", etdev->dev_name, GIT_REPO_TAG);
+ /* Turn the device off unless a client request is already received. */
+ edgetpu_pm_shutdown(etdev, false);
+
+ return 0;
+out_destroy_fw:
+ edgetpu_mobile_firmware_destroy(etdev);
+out_tel_exit:
+ edgetpu_telemetry_exit(etdev);
+out_remove_irq:
+ edgetpu_platform_remove_irq(etmdev);
+out_remove_device:
+ edgetpu_device_remove(etdev);
+out_destroy_iremap:
+ edgetpu_iremap_pool_destroy(etdev);
+out_cleanup_fw:
+ edgetpu_platform_cleanup_fw_region(etmdev);
+out_shutdown:
+ dev_dbg(dev, "Probe finished with error %d, powering down", ret);
+ edgetpu_pm_shutdown(etdev, true);
+ return ret;
+}
+
+static int edgetpu_mobile_platform_remove(struct platform_device *pdev)
+{
+ struct edgetpu_dev *etdev = platform_get_drvdata(pdev);
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+
+ if (etmdev->before_remove)
+ etmdev->before_remove(etmdev);
+ edgetpu_mobile_firmware_destroy(etdev);
+ edgetpu_platform_remove_irq(etmdev);
+ edgetpu_pm_get(etdev->pm);
+ edgetpu_telemetry_exit(etdev);
+ edgetpu_device_remove(etdev);
+ edgetpu_iremap_pool_destroy(etdev);
+ edgetpu_platform_cleanup_fw_region(etmdev);
+ edgetpu_pm_put(etdev->pm);
+ edgetpu_pm_shutdown(etdev, true);
+ mobile_pm_destroy(etdev);
+ return 0;
+}
diff --git a/drivers/edgetpu/edgetpu-mobile-platform.h b/drivers/edgetpu/edgetpu-mobile-platform.h
new file mode 100644
index 0000000..65184ae
--- /dev/null
+++ b/drivers/edgetpu/edgetpu-mobile-platform.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common platform interfaces for mobile TPU chips.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#ifndef __EDGETPU_MOBILE_PLATFORM_H__
+#define __EDGETPU_MOBILE_PLATFORM_H__
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <soc/google/exynos_pm_qos.h>
+
+#if IS_ENABLED(CONFIG_GOOGLE_BCL)
+#include <soc/google/bcl.h>
+#endif
+
+#include "edgetpu-config.h"
+#include "edgetpu-internal.h"
+
+#define to_mobile_dev(etdev) container_of(etdev, struct edgetpu_mobile_platform_dev, edgetpu_dev)
+
+struct edgetpu_mobile_platform_pwr {
+ struct dentry *debugfs_dir;
+ struct mutex policy_lock;
+ enum edgetpu_pwr_state curr_policy;
+ struct mutex state_lock;
+ u64 min_state;
+ u64 requested_state;
+ /* INT/MIF requests for memory bandwidth */
+ struct exynos_pm_qos_request int_min;
+ struct exynos_pm_qos_request mif_min;
+ /* BTS */
+ unsigned int performance_scenario;
+ int scenario_count;
+ struct mutex scenario_lock;
+
+ /* LPM callbacks, NULL for chips without LPM */
+ int (*lpm_up)(struct edgetpu_dev *etdev);
+ void (*lpm_down)(struct edgetpu_dev *etdev);
+
+ /* Block shutdown callback, may be NULL */
+ void (*block_down)(struct edgetpu_dev *etdev);
+
+ /* Firmware shutdown callback. Must be implemented */
+ void (*firmware_down)(struct edgetpu_dev *etdev);
+
+ /* Chip-specific setup after the PM interface is created */
+ int (*after_create)(struct edgetpu_dev *etdev);
+
+ /* Chip-specific cleanup before the PM interface is destroyed */
+ int (*before_destroy)(struct edgetpu_dev *etdev);
+
+ /* ACPM set rate callback. Must be implemented */
+ int (*acpm_set_rate)(unsigned int id, unsigned long rate);
+};
+
+struct edgetpu_mobile_platform_dev {
+ /* Generic edgetpu device */
+ struct edgetpu_dev edgetpu_dev;
+ /* Common mobile platform power interface */
+ struct edgetpu_mobile_platform_pwr platform_pwr;
+ /* Physical address of the firmware image */
+ phys_addr_t fw_region_paddr;
+ /* Size of the firmware region */
+ size_t fw_region_size;
+ /* Virtual address of the memory region shared with firmware */
+ void *shared_mem_vaddr;
+ /* Physical address of the memory region shared with firmware */
+ phys_addr_t shared_mem_paddr;
+ /* Size of the shared memory region size */
+ size_t shared_mem_size;
+ /* Physical address of the firmware context region */
+ phys_addr_t fw_ctx_paddr;
+ /* Size of the firmware context region */
+ size_t fw_ctx_size;
+ /*
+ * Pointer to GSA device for firmware authentication.
+ * May be NULL if the chip does not support firmware authentication
+ */
+ struct device *gsa_dev;
+ /* Virtual address of the SSMT block for this chip. */
+ void __iomem *ssmt_base;
+ /* Coherent log buffer */
+ struct edgetpu_coherent_mem *log_mem;
+ /* Coherent trace buffer */
+ struct edgetpu_coherent_mem *trace_mem;
+#if IS_ENABLED(CONFIG_GOOGLE_BCL)
+ struct bcl_device *bcl_dev;
+#endif
+ /* Protects TZ Mailbox client pointer */
+ struct mutex tz_mailbox_lock;
+ /* TZ mailbox client */
+ struct edgetpu_client *secure_client;
+
+ /* Length of @irq */
+ int n_irq;
+ /* Array of IRQ numbers */
+ int *irq;
+
+ /* callbacks for chip-dependent implementations */
+
+ /*
+ * Called when common device probing procedure is done.
+ *
+ * Return a non-zero value can fail the probe procedure.
+ *
+ * This callback is optional.
+ */
+ int (*after_probe)(struct edgetpu_mobile_platform_dev *etmdev);
+ /*
+ * Called before common device removal procedure.
+ *
+ * This callback is optional.
+ */
+ void (*before_remove)(struct edgetpu_mobile_platform_dev *etmdev);
+};
+
+#endif /* __EDGETPU_MOBILE_PLATFORM_H__ */
diff --git a/drivers/edgetpu/edgetpu-pm.c b/drivers/edgetpu/edgetpu-pm.c
index 872149f..e6bfa1d 100644
--- a/drivers/edgetpu/edgetpu-pm.c
+++ b/drivers/edgetpu/edgetpu-pm.c
@@ -8,6 +8,7 @@
#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include "edgetpu-config.h"
#include "edgetpu-internal.h"
@@ -24,10 +25,19 @@
#define SIM_PCHANNEL(...)
#endif
+#define EDGETPU_ASYNC_POWER_DOWN_RETRY_DELAY 200
+
struct edgetpu_pm_private {
const struct edgetpu_pm_handlers *handlers;
struct mutex lock;
+ /* Power up counter. Protected by @lock */
int power_up_count;
+ /* Flag indicating a deferred power down is pending. Protected by @lock */
+ bool power_down_pending;
+ /* Worker to handle async power down retry */
+ struct delayed_work power_down_work;
+ /* Back pointer to parent struct */
+ struct edgetpu_pm *etpm;
};
/*
@@ -91,12 +101,49 @@ int edgetpu_pm_get(struct edgetpu_pm *etpm)
if (!etpm || !etpm->p->handlers || !etpm->p->handlers->power_up)
return 0;
+
mutex_lock(&etpm->p->lock);
+ etpm->p->power_down_pending = false;
ret = edgetpu_pm_get_locked(etpm);
mutex_unlock(&etpm->p->lock);
+
return ret;
}
+/* Caller must hold @etpm->p->lock */
+static void edgetpu_pm_try_power_down(struct edgetpu_pm *etpm)
+{
+ int ret = etpm->p->handlers->power_down(etpm);
+
+ if (ret == -EAGAIN) {
+ etdev_warn(etpm->etdev, "Power down request denied. Retrying in %d ms\n",
+ EDGETPU_ASYNC_POWER_DOWN_RETRY_DELAY);
+ etpm->p->power_down_pending = true;
+ schedule_delayed_work(&etpm->p->power_down_work,
+ msecs_to_jiffies(EDGETPU_ASYNC_POWER_DOWN_RETRY_DELAY));
+ } else {
+ if (ret)
+ etdev_warn(etpm->etdev, "Power down request failed (%d)\n", ret);
+ etpm->p->power_down_pending = false;
+ }
+}
+
+/* Worker for async power down */
+static void edgetpu_pm_async_power_down_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work, work);
+ struct edgetpu_pm_private *p =
+ container_of(dwork, struct edgetpu_pm_private, power_down_work);
+
+ mutex_lock(&p->lock);
+ etdev_info(p->etpm->etdev, "Delayed power down starting\n");
+ if (p->power_down_pending)
+ edgetpu_pm_try_power_down(p->etpm);
+ else
+ etdev_info(p->etpm->etdev, "Delayed power down cancelled\n");
+ mutex_unlock(&p->lock);
+}
+
void edgetpu_pm_put(struct edgetpu_pm *etpm)
{
if (!etpm || !etpm->p->handlers || !etpm->p->handlers->power_down)
@@ -110,7 +157,7 @@ void edgetpu_pm_put(struct edgetpu_pm *etpm)
}
if (!--etpm->p->power_up_count) {
edgetpu_sw_wdt_stop(etpm->etdev);
- etpm->p->handlers->power_down(etpm);
+ edgetpu_pm_try_power_down(etpm);
}
etdev_dbg(etpm->etdev, "%s: %d\n", __func__, etpm->p->power_up_count);
mutex_unlock(&etpm->p->lock);
@@ -138,6 +185,8 @@ int edgetpu_pm_create(struct edgetpu_dev *etdev,
goto out_free_etpm;
}
+ INIT_DELAYED_WORK(&etpm->p->power_down_work, edgetpu_pm_async_power_down_work);
+ etpm->p->etpm = etpm;
etpm->p->handlers = handlers;
etpm->etdev = etdev;
@@ -167,6 +216,8 @@ void edgetpu_pm_destroy(struct edgetpu_dev *etdev)
return;
if (etdev->pm->p) {
handlers = etdev->pm->p->handlers;
+ etdev->pm->p->power_down_pending = false;
+ cancel_delayed_work_sync(&etdev->pm->p->power_down_work);
if (handlers && handlers->before_destroy)
handlers->before_destroy(etdev->pm);
kfree(etdev->pm->p);
@@ -181,6 +232,7 @@ void edgetpu_pm_shutdown(struct edgetpu_dev *etdev, bool force)
if (!etpm)
return;
+
mutex_lock(&etpm->p->lock);
/* someone is using the device */
diff --git a/drivers/edgetpu/edgetpu-pm.h b/drivers/edgetpu/edgetpu-pm.h
index aef35f6..5f44ab4 100644
--- a/drivers/edgetpu/edgetpu-pm.h
+++ b/drivers/edgetpu/edgetpu-pm.h
@@ -28,7 +28,7 @@ struct edgetpu_pm_handlers {
/* Platform-specific power up. Nesting is handled at generic layer */
int (*power_up)(struct edgetpu_pm *etpm);
/* Platform-specific power down. Nesting is handled at generic layer */
- void (*power_down)(struct edgetpu_pm *etpm);
+ int (*power_down)(struct edgetpu_pm *etpm);
};
struct edgetpu_pm {
diff --git a/drivers/edgetpu/edgetpu-shared-fw.c b/drivers/edgetpu/edgetpu-shared-fw.c
deleted file mode 100644
index 1a6bcb9..0000000
--- a/drivers/edgetpu/edgetpu-shared-fw.c
+++ /dev/null
@@ -1,360 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Edge TPU shared firmware management.
- *
- * Copyright (C) 2020 Google, Inc.
- */
-
-#include <linux/device.h>
-#include <linux/firmware.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/refcount.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include "edgetpu-firmware-util.h"
-#include "edgetpu-internal.h"
-#include "edgetpu-shared-fw.h"
-
-struct edgetpu_shared_fw_buffer {
- /*
- * Shared firmware buffer is managed by `global.firmware_list`, so that
- * each data member is protected by `global.lock`.
- */
- struct list_head list;
- /*
- * Counting for devices holding the buffer. We can only release the data
- * buffer if there is no device nor sysfs holding the firmware.
- *
- * Even when the reference count atomically decreased down to 0, there's
- * a chance that someone is traversing list and trying to read this
- * `ref`. So `ref` must still be protected by `glock.lock` in this
- * case.
- */
- refcount_t ref;
- /*
- * Indicates if this buffer is loaded by sysfs.
- *
- * Reference count caused by sysfs load should be exactly 1, and we can
- * only unload firmware by sysfs if already loaded by sysfs.
- */
- bool is_sysfs_loaded;
- /* Firmware name, the same as that loaded by request_firmware() API. */
- const char *name;
- void *vaddr;
- /* The size of buffer is aligned to `global.init_data.size_align`. */
- size_t size;
-};
-
-const char *
-edgetpu_shared_fw_buffer_name(const struct edgetpu_shared_fw_buffer *buffer)
-{
- return buffer->name;
-}
-
-void *
-edgetpu_shared_fw_buffer_vaddr(const struct edgetpu_shared_fw_buffer *buffer)
-{
- return buffer->vaddr;
-}
-
-size_t
-edgetpu_shared_fw_buffer_size(const struct edgetpu_shared_fw_buffer *buffer)
-{
- return buffer->size;
-}
-
-/*
- * Lock protected global data.
- *
- * global.lock is required for invoking _locked functions in this file.
- */
-static struct {
- struct mutex lock;
- struct edgetpu_shared_fw_init_data init_data;
- struct list_head firmware_list;
-} global = {
- .lock = __MUTEX_INITIALIZER(global.lock),
- .firmware_list = LIST_HEAD_INIT(global.firmware_list),
-};
-
-#define for_each_shared_fw_buffer(buffer) \
- list_for_each_entry(buffer, &global.firmware_list, list)
-#define for_each_shared_fw_buffer_safe(cur_buf, nxt_buf) \
- list_for_each_entry_safe(cur_buf, nxt_buf, &global.firmware_list, list)
-
-static struct edgetpu_shared_fw_buffer *
-edgetpu_shared_fw_find_locked(const char *name)
-{
- struct edgetpu_shared_fw_buffer *buffer;
-
- for_each_shared_fw_buffer(buffer) {
- if (!strcmp(name, buffer->name))
- return buffer;
- }
- return NULL;
-}
-
-int
-edgetpu_shared_fw_init(const struct edgetpu_shared_fw_init_data *init_data)
-{
- if (!list_empty(&global.firmware_list)) {
- pr_err("%s: already initialized with firmware loaded.\n",
- __func__);
- return -EINVAL;
- }
-
- global.init_data = *init_data;
- return 0;
-}
-
-void edgetpu_shared_fw_exit(void)
-{
- struct edgetpu_shared_fw_buffer *cur_buf, *nxt_buf;
-
- mutex_lock(&global.lock);
-
- if (!list_empty(&global.firmware_list))
- pr_warn("%s: firmware not released on exiting\n", __func__);
-
- for_each_shared_fw_buffer_safe(cur_buf, nxt_buf)
- list_del(&cur_buf->list);
-
- /*
- * TODO(b/152573549): release all firmwares besides clearing the list.
- * We have to add a handler for forced stop/unload on loading/getting
- * firmware.
- */
-
- mutex_unlock(&global.lock);
-}
-
-static struct edgetpu_shared_fw_buffer *
-edgetpu_shared_fw_get_locked(struct edgetpu_shared_fw_buffer *buffer)
-{
- if (!buffer)
- return NULL;
- if (!refcount_inc_not_zero(&buffer->ref))
- return NULL;
- return buffer;
-}
-
-struct edgetpu_shared_fw_buffer *
-edgetpu_shared_fw_get(struct edgetpu_shared_fw_buffer *buffer)
-{
- mutex_lock(&global.lock);
- buffer = edgetpu_shared_fw_get_locked(buffer);
- mutex_unlock(&global.lock);
- return buffer;
-}
-
-struct edgetpu_shared_fw_buffer *
-edgetpu_shared_fw_get_by_name(const char *name)
-{
- struct edgetpu_shared_fw_buffer *buffer;
-
- mutex_lock(&global.lock);
- buffer = edgetpu_shared_fw_get_locked(
- edgetpu_shared_fw_find_locked(name));
- mutex_unlock(&global.lock);
- return buffer;
-}
-
-static struct edgetpu_shared_fw_buffer *
-edgetpu_shared_fw_load_locked(const char *name, struct edgetpu_dev *etdev)
-{
- int ret;
- const struct firmware *fw;
- size_t aligned_size;
- struct edgetpu_shared_fw_buffer *buffer;
-
- buffer = edgetpu_shared_fw_get_locked(
- edgetpu_shared_fw_find_locked(name));
- if (buffer) {
- pr_debug("%s: found shared fw image %s\n", __func__, name);
- return buffer;
- }
-
- pr_debug("%s: shared fw image %s not found, requesting\n",
- __func__, name);
- ret = request_firmware(&fw, name, etdev ? etdev->etiface->etcdev : NULL);
- if (ret)
- goto out;
-
- aligned_size = ALIGN(fw->size, global.init_data.size_align);
-
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- goto out_release_firmware;
- }
-
- buffer->name = kstrdup(name, GFP_KERNEL);
- if (!buffer->name) {
- ret = -ENOMEM;
- goto out_kfree_buffer;
- }
-
- /* Allocated in page alignment for mmu and dma mapping. */
- if (aligned_size < PAGE_SIZE)
- buffer->vaddr = kmalloc_order(aligned_size, GFP_KERNEL, 1);
- else
- buffer->vaddr = kmalloc(aligned_size, GFP_KERNEL);
- if (!buffer->vaddr) {
- ret = -ENOMEM;
- goto out_kfree_buffer_name;
- }
- memcpy(buffer->vaddr, fw->data, fw->size);
- release_firmware(fw);
-
- buffer->size = aligned_size;
- refcount_set(&buffer->ref, 1);
-
- list_add(&buffer->list, &global.firmware_list);
- return buffer;
-
-out_kfree_buffer_name:
- kfree(buffer->name);
-out_kfree_buffer:
- kfree(buffer);
-out_release_firmware:
- release_firmware(fw);
-out:
- return ERR_PTR(ret);
-}
-
-struct edgetpu_shared_fw_buffer *edgetpu_shared_fw_load(
- const char *name, struct edgetpu_dev *etdev)
-{
- struct edgetpu_shared_fw_buffer *buffer;
-
- mutex_lock(&global.lock);
- buffer = edgetpu_shared_fw_load_locked(name, etdev);
- mutex_unlock(&global.lock);
- return buffer;
-}
-
-static void
-edgetpu_shared_fw_put_locked(struct edgetpu_shared_fw_buffer *buffer)
-{
- if (!buffer)
- return;
-
- /*
- * buffer->ref IS protected by global.lock. See also `ref` in `struct
- * edgetpu_shared_fw_buffer`.
- */
- if (refcount_dec_and_test(&buffer->ref)) {
- kfree(buffer->vaddr);
- kfree(buffer->name);
- list_del(&buffer->list);
- kfree(buffer);
- }
-}
-
-void edgetpu_shared_fw_put(struct edgetpu_shared_fw_buffer *buffer)
-{
- mutex_lock(&global.lock);
- edgetpu_shared_fw_put_locked(buffer);
- mutex_unlock(&global.lock);
-}
-
-static ssize_t shared_fw_load_store(struct device_driver *drv,
- const char *buf, size_t count)
-{
- struct edgetpu_shared_fw_buffer *buffer;
- char *name;
- ssize_t ret;
-
- name = edgetpu_fwutil_name_from_attr_buf(buf);
- if (IS_ERR(name))
- return PTR_ERR(name);
-
- mutex_lock(&global.lock);
-
- /*
- * TODO(b/152573549): reload firmware to read the latest firmware on
- * filesystem.
- */
-
- buffer = edgetpu_shared_fw_load_locked(name, NULL);
- if (IS_ERR(buffer)) {
- ret = PTR_ERR(buffer);
- goto out_mutex_unlock;
- }
-
- if (buffer->is_sysfs_loaded)
- edgetpu_shared_fw_put_locked(buffer);
- else
- buffer->is_sysfs_loaded = true;
-
- ret = count;
-
-out_mutex_unlock:
- mutex_unlock(&global.lock);
- kfree(name);
- return ret;
-}
-static DRIVER_ATTR_WO(shared_fw_load);
-
-static ssize_t shared_fw_unload_store(struct device_driver *drv,
- const char *buf, size_t count)
-{
- struct edgetpu_shared_fw_buffer *buffer;
- char *name;
- ssize_t ret;
-
- name = edgetpu_fwutil_name_from_attr_buf(buf);
- if (IS_ERR(name))
- return PTR_ERR(name);
-
- mutex_lock(&global.lock);
-
- buffer = edgetpu_shared_fw_find_locked(name);
- if (!buffer || !buffer->is_sysfs_loaded) {
- ret = -ENOENT;
- goto out_mutex_unlock;
- }
- buffer->is_sysfs_loaded = false;
- edgetpu_shared_fw_put_locked(buffer);
- ret = count;
-
-out_mutex_unlock:
- mutex_unlock(&global.lock);
- kfree(name);
- return ret;
-}
-static DRIVER_ATTR_WO(shared_fw_unload);
-
-static struct driver_attribute *driver_attrs[] = {
- &driver_attr_shared_fw_load,
- &driver_attr_shared_fw_unload,
- NULL,
-};
-
-int edgetpu_shared_fw_add_driver_attrs(struct device_driver *driver)
-{
- struct driver_attribute **driver_attr;
- int ret;
-
- for (driver_attr = driver_attrs; *driver_attr; driver_attr++) {
- ret = driver_create_file(driver, *driver_attr);
- if (ret)
- goto out_remove_driver_attrs;
- }
- return 0;
-
-out_remove_driver_attrs:
- while (--driver_attr >= driver_attrs)
- driver_remove_file(driver, *driver_attr);
- return ret;
-}
-
-void edgetpu_shared_fw_remove_driver_attrs(struct device_driver *driver)
-{
- struct driver_attribute **driver_attr;
-
- for (driver_attr = driver_attrs; *driver_attr; driver_attr++)
- driver_remove_file(driver, *driver_attr);
-}
diff --git a/drivers/edgetpu/edgetpu-shared-fw.h b/drivers/edgetpu/edgetpu-shared-fw.h
deleted file mode 100644
index 101ad79..0000000
--- a/drivers/edgetpu/edgetpu-shared-fw.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Implements utilities for shared firmware management of EdgeTPU.
- *
- * Copyright (C) 2020 Google, Inc.
- */
-#ifndef __EDGETPU_SHARED_FW_H__
-#define __EDGETPU_SHARED_FW_H__
-
-#include <linux/device.h>
-
-#include "edgetpu-internal.h"
-
-struct edgetpu_shared_fw_buffer;
-
-/*
- * name for this firmware in null terminated string, the same as which loaded by
- * linux request_firmware API
- */
-const char *
-edgetpu_shared_fw_buffer_name(const struct edgetpu_shared_fw_buffer *buffer);
-/* host address for this firmware */
-void *
-edgetpu_shared_fw_buffer_vaddr(const struct edgetpu_shared_fw_buffer *buffer);
-/* size in bytes for this firmware */
-size_t
-edgetpu_shared_fw_buffer_size(const struct edgetpu_shared_fw_buffer *buffer);
-
-struct edgetpu_shared_fw_init_data {
- /* firmware size alignment in bytes */
- size_t size_align;
-};
-
-/* Initializes structures for shared firmware management. */
-int
-edgetpu_shared_fw_init(const struct edgetpu_shared_fw_init_data *init_data);
-/* Finalizes structures for shared firmware management. */
-void edgetpu_shared_fw_exit(void);
-
-/*
- * Load reference counted shared firmware from file system. Increase reference
- * count by 1 if the firmware is already loaded before.
- *
- * Firmware loaded by this function should be released by
- * edgetpu_shared_fw_put().
- *
- * @name: firmware path to be loaded
- * @etdev: requesting edgetpu_dev, if any, for logging
- */
-struct edgetpu_shared_fw_buffer *edgetpu_shared_fw_load(
- const char *name, struct edgetpu_dev *etdev);
-
-/*
- * Increase the reference count of the buffer by 1.
- *
- * returns the buffer, behave the same as other *_get/put() functions
- */
-struct edgetpu_shared_fw_buffer *
-edgetpu_shared_fw_get(struct edgetpu_shared_fw_buffer *buffer);
-/*
- * Find the shared firmware by name and increase the reference count of the
- * found buffer by 1.
- *
- * returns NULL on error or not found
- */
-struct edgetpu_shared_fw_buffer *
-edgetpu_shared_fw_get_by_name(const char *name);
-
-/*
- * Decrease the reference count by 1 and free the shared buffer if its
- * reference count reaches 0.
- */
-void edgetpu_shared_fw_put(struct edgetpu_shared_fw_buffer *buffer);
-
-/*
- * (Add/Remove) driver-wide sysfs attributes for development and debug.
- */
-int edgetpu_shared_fw_add_driver_attrs(struct device_driver *driver);
-void edgetpu_shared_fw_remove_driver_attrs(struct device_driver *driver);
-
-#endif /* __EDGETPU_SHARED_FW_H__ */
diff --git a/drivers/edgetpu/edgetpu-telemetry.c b/drivers/edgetpu/edgetpu-telemetry.c
index abc9095..f18cef8 100644
--- a/drivers/edgetpu/edgetpu-telemetry.c
+++ b/drivers/edgetpu/edgetpu-telemetry.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -105,7 +106,7 @@ static void telemetry_unset_event(struct edgetpu_dev *etdev,
static void copy_with_wrap(struct edgetpu_telemetry_header *header, void *dest,
u32 length, u32 size, void *start)
{
- const u32 wrap_bit = EDGETPU_TELEMETRY_WRAP_BIT;
+ const u32 wrap_bit = size + sizeof(*header);
u32 remaining = 0;
u32 head = header->head & (wrap_bit - 1);
@@ -242,9 +243,9 @@ static void telemetry_mappings_show(struct edgetpu_telemetry *tel,
if (!tel->inited)
return;
- seq_printf(s, " 0x%llx %lu %s 0x%llx %pad\n",
+ seq_printf(s, " %#llx %lu %s %#llx %pad\n",
tel->coherent_mem.tpu_addr,
- tel->coherent_mem.size / PAGE_SIZE, tel->name,
+ DIV_ROUND_UP(tel->coherent_mem.size, PAGE_SIZE), tel->name,
tel->coherent_mem.host_addr, &tel->coherent_mem.dma_addr);
}
@@ -287,12 +288,10 @@ static int telemetry_mmap_buffer(struct edgetpu_dev *etdev,
return ret;
}
-static int telemetry_init(struct edgetpu_dev *etdev,
- struct edgetpu_telemetry *tel, const char *name,
- struct edgetpu_coherent_mem *mem,
+static int telemetry_init(struct edgetpu_dev *etdev, struct edgetpu_telemetry *tel,
+ const char *name, struct edgetpu_coherent_mem *mem, const size_t size,
void (*fallback)(struct edgetpu_telemetry *))
{
- const size_t size = EDGETPU_TELEMETRY_BUFFER_SIZE;
const u32 flags = EDGETPU_MMU_DIE | EDGETPU_MMU_32 | EDGETPU_MMU_HOST;
void *vaddr;
dma_addr_t dma_addr;
@@ -375,31 +374,45 @@ int edgetpu_telemetry_init(struct edgetpu_dev *etdev,
struct edgetpu_coherent_mem *log_mem,
struct edgetpu_coherent_mem *trace_mem)
{
- int ret;
+ int ret, i;
if (!etdev->telemetry)
return -ENODEV;
- ret = telemetry_init(etdev, &etdev->telemetry->log, "telemetry_log",
- log_mem, edgetpu_fw_log);
- if (ret)
- return ret;
+
+ for (i = 0; i < etdev->num_cores; i++) {
+ ret = telemetry_init(etdev, &etdev->telemetry[i].log, "telemetry_log",
+ log_mem ? &log_mem[i] : NULL,
+ EDGETPU_TELEMETRY_LOG_BUFFER_SIZE, edgetpu_fw_log);
+ if (ret)
+ break;
#if IS_ENABLED(CONFIG_EDGETPU_TELEMETRY_TRACE)
- ret = telemetry_init(etdev, &etdev->telemetry->trace, "telemetry_trace",
- trace_mem, edgetpu_fw_trace);
- if (ret) {
- telemetry_exit(etdev, &etdev->telemetry->log);
- return ret;
- }
+ ret = telemetry_init(etdev, &etdev->telemetry[i].trace, "telemetry_trace",
+ trace_mem ? &trace_mem[i] : NULL,
+ EDGETPU_TELEMETRY_TRACE_BUFFER_SIZE, edgetpu_fw_trace);
+ if (ret)
+ break;
#endif
- return 0;
+ }
+
+ if (ret)
+ edgetpu_telemetry_exit(etdev);
+
+ return ret;
}
void edgetpu_telemetry_exit(struct edgetpu_dev *etdev)
{
+ int i;
+
if (!etdev->telemetry)
return;
- telemetry_exit(etdev, &etdev->telemetry->trace);
- telemetry_exit(etdev, &etdev->telemetry->log);
+
+ for (i = 0; i < etdev->num_cores; i++) {
+#if IS_ENABLED(CONFIG_EDGETPU_TELEMETRY_TRACE)
+ telemetry_exit(etdev, &etdev->telemetry[i].trace);
+#endif
+ telemetry_exit(etdev, &etdev->telemetry[i].log);
+ }
}
int edgetpu_telemetry_kci(struct edgetpu_dev *etdev)
@@ -408,14 +421,17 @@ int edgetpu_telemetry_kci(struct edgetpu_dev *etdev)
if (!etdev->telemetry)
return -ENODEV;
- ret = telemetry_kci(etdev, &etdev->telemetry->log,
- edgetpu_kci_map_log_buffer);
+
+ /* Core 0 will notify other cores. */
+ ret = telemetry_kci(etdev, &etdev->telemetry[0].log, edgetpu_kci_map_log_buffer);
if (ret)
return ret;
- ret = telemetry_kci(etdev, &etdev->telemetry->trace,
- edgetpu_kci_map_trace_buffer);
+
+#if IS_ENABLED(CONFIG_EDGETPU_TELEMETRY_TRACE)
+ ret = telemetry_kci(etdev, &etdev->telemetry[0].trace, edgetpu_kci_map_trace_buffer);
if (ret)
return ret;
+#endif
return 0;
}
@@ -423,59 +439,87 @@ int edgetpu_telemetry_kci(struct edgetpu_dev *etdev)
int edgetpu_telemetry_set_event(struct edgetpu_dev *etdev,
enum edgetpu_telemetry_type type, u32 eventfd)
{
+ int i, ret;
+
if (!etdev->telemetry)
return -ENODEV;
- return telemetry_set_event(
- etdev, select_telemetry(etdev->telemetry, type), eventfd);
+
+ for (i = 0; i < etdev->num_cores; i++) {
+ ret = telemetry_set_event(etdev, select_telemetry(&etdev->telemetry[i], type),
+ eventfd);
+ if (ret) {
+ edgetpu_telemetry_unset_event(etdev, type);
+ return ret;
+ }
+ }
+
+ return 0;
}
void edgetpu_telemetry_unset_event(struct edgetpu_dev *etdev,
enum edgetpu_telemetry_type type)
{
+ int i;
+
if (!etdev->telemetry)
return;
- telemetry_unset_event(etdev, select_telemetry(etdev->telemetry, type));
+
+ for (i = 0; i < etdev->num_cores; i++)
+ telemetry_unset_event(etdev, select_telemetry(&etdev->telemetry[i], type));
}
void edgetpu_telemetry_irq_handler(struct edgetpu_dev *etdev)
{
+ int i;
+
if (!etdev->telemetry)
return;
- telemetry_irq_handler(etdev, &etdev->telemetry->log);
- telemetry_irq_handler(etdev, &etdev->telemetry->trace);
+
+ for (i = 0; i < etdev->num_cores; i++) {
+ telemetry_irq_handler(etdev, &etdev->telemetry[i].log);
+#if IS_ENABLED(CONFIG_EDGETPU_TELEMETRY_TRACE)
+ telemetry_irq_handler(etdev, &etdev->telemetry[i].trace);
+#endif
+ }
}
void edgetpu_telemetry_mappings_show(struct edgetpu_dev *etdev,
struct seq_file *s)
{
+ int i;
+
if (!etdev->telemetry)
return;
- telemetry_mappings_show(&etdev->telemetry->log, s);
- telemetry_mappings_show(&etdev->telemetry->trace, s);
+
+ for (i = 0; i < etdev->num_cores; i++) {
+ telemetry_mappings_show(&etdev->telemetry[i].log, s);
+#if IS_ENABLED(CONFIG_EDGETPU_TELEMETRY_TRACE)
+ telemetry_mappings_show(&etdev->telemetry[i].trace, s);
+#endif
+ }
}
-int edgetpu_mmap_telemetry_buffer(struct edgetpu_dev *etdev,
- enum edgetpu_telemetry_type type,
- struct vm_area_struct *vma)
+int edgetpu_mmap_telemetry_buffer(struct edgetpu_dev *etdev, enum edgetpu_telemetry_type type,
+ struct vm_area_struct *vma, int core_id)
{
if (!etdev->telemetry)
return -ENODEV;
- return telemetry_mmap_buffer(
- etdev, select_telemetry(etdev->telemetry, type), vma);
+ return telemetry_mmap_buffer(etdev, select_telemetry(&etdev->telemetry[core_id], type),
+ vma);
}
-void edgetpu_telemetry_inc_mmap_count(struct edgetpu_dev *etdev,
- enum edgetpu_telemetry_type type)
+void edgetpu_telemetry_inc_mmap_count(struct edgetpu_dev *etdev, enum edgetpu_telemetry_type type,
+ int core_id)
{
if (!etdev->telemetry)
return;
- telemetry_inc_mmap_count(select_telemetry(etdev->telemetry, type), 1);
+ telemetry_inc_mmap_count(select_telemetry(&etdev->telemetry[core_id], type), 1);
}
-void edgetpu_telemetry_dec_mmap_count(struct edgetpu_dev *etdev,
- enum edgetpu_telemetry_type type)
+void edgetpu_telemetry_dec_mmap_count(struct edgetpu_dev *etdev, enum edgetpu_telemetry_type type,
+ int core_id)
{
if (!etdev->telemetry)
return;
- telemetry_inc_mmap_count(select_telemetry(etdev->telemetry, type), -1);
+ telemetry_inc_mmap_count(select_telemetry(&etdev->telemetry[core_id], type), -1);
}
diff --git a/drivers/edgetpu/edgetpu-telemetry.h b/drivers/edgetpu/edgetpu-telemetry.h
index a25d002..2c89aff 100644
--- a/drivers/edgetpu/edgetpu-telemetry.h
+++ b/drivers/edgetpu/edgetpu-telemetry.h
@@ -27,9 +27,9 @@
#define EDGETPU_FW_DMESG_LOG_LEVEL (EDGETPU_FW_LOG_LEVEL_ERROR)
-#define EDGETPU_TELEMETRY_BUFFER_SIZE (16 * 4096)
-/* assumes buffer size is power of 2 */
-#define EDGETPU_TELEMETRY_WRAP_BIT EDGETPU_TELEMETRY_BUFFER_SIZE
+/* Buffer size must be a power of 2 */
+#define EDGETPU_TELEMETRY_LOG_BUFFER_SIZE (16 * 4096)
+#define EDGETPU_TELEMETRY_TRACE_BUFFER_SIZE (64 * 4096)
enum edgetpu_telemetry_state {
EDGETPU_TELEMETRY_DISABLED = 0,
@@ -99,7 +99,7 @@ struct edgetpu_telemetry_ctx {
/*
* Allocates resources needed for @etdev->telemetry.
*
- * Optionally provide coherent_mem buffers for log and trace.
+ * Optionally provide arrays of etdev->num_cores coherent_mem buffers for log and trace.
* If any of these are NULL, they will be allocated and freed by telemetry code.
*
* Returns 0 on success, or a negative errno on error.
@@ -139,12 +139,11 @@ void edgetpu_telemetry_mappings_show(struct edgetpu_dev *etdev,
struct seq_file *s);
/* Map telemetry buffer into user space. */
-int edgetpu_mmap_telemetry_buffer(struct edgetpu_dev *etdev,
- enum edgetpu_telemetry_type type,
- struct vm_area_struct *vma);
-void edgetpu_telemetry_inc_mmap_count(struct edgetpu_dev *etdev,
- enum edgetpu_telemetry_type type);
-void edgetpu_telemetry_dec_mmap_count(struct edgetpu_dev *etdev,
- enum edgetpu_telemetry_type type);
+int edgetpu_mmap_telemetry_buffer(struct edgetpu_dev *etdev, enum edgetpu_telemetry_type type,
+ struct vm_area_struct *vma, int core_id);
+void edgetpu_telemetry_inc_mmap_count(struct edgetpu_dev *etdev, enum edgetpu_telemetry_type type,
+ int core_id);
+void edgetpu_telemetry_dec_mmap_count(struct edgetpu_dev *etdev, enum edgetpu_telemetry_type type,
+ int core_id);
#endif /* __EDGETPU_TELEMETRY_H__ */
diff --git a/drivers/edgetpu/edgetpu-usage-stats.c b/drivers/edgetpu/edgetpu-usage-stats.c
index 0001210..f796117 100644
--- a/drivers/edgetpu/edgetpu-usage-stats.c
+++ b/drivers/edgetpu/edgetpu-usage-stats.c
@@ -8,42 +8,14 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
+#include "edgetpu-config.h"
#include "edgetpu-internal.h"
#include "edgetpu-kci.h"
#include "edgetpu-usage-stats.h"
-#if IS_ENABLED(CONFIG_ABROLHOS)
-#include "abrolhos-pm.h"
-
-static enum tpu_pwr_state tpu_states_arr[] = {
- TPU_ACTIVE_UUD,
- TPU_ACTIVE_SUD,
- TPU_ACTIVE_UD,
- TPU_ACTIVE_NOM,
- TPU_ACTIVE_OD,
-};
-
-#else /* CONFIG_HERMOSA */
-
-static uint32_t tpu_states_arr[] = {
- 4, /* kActiveMinPower, kActiveVeryLowPower: 400MHz */
- 5, /* kActiveLowPower: 800MHz */
- 6, /* kActive: 950MHz */
-};
-
-static uint32_t tpu_states_display[] = {
- 400,
- 800,
- 950,
-};
-
-#endif /* CONFIG_ABROLHOS */
-
-#define NUM_TPU_STATES ARRAY_SIZE(tpu_states_arr)
-
struct uid_entry {
int32_t uid;
- uint64_t time_in_state[NUM_TPU_STATES];
+ uint64_t time_in_state[EDGETPU_NUM_STATES];
struct hlist_node node;
};
@@ -51,8 +23,8 @@ static int tpu_state_map(uint32_t state)
{
int i;
- for (i = (NUM_TPU_STATES - 1); i >= 0; i--) {
- if (state >= tpu_states_arr[i])
+ for (i = (EDGETPU_NUM_STATES - 1); i >= 0; i--) {
+ if (state >= edgetpu_active_states[i])
return i;
}
@@ -322,13 +294,9 @@ static ssize_t tpu_usage_show(struct device *dev,
/* uid: state0speed state1speed ... */
ret += scnprintf(buf, PAGE_SIZE, "uid:");
- for (i = 0; i < NUM_TPU_STATES; i++)
+ for (i = 0; i < EDGETPU_NUM_STATES; i++)
ret += scnprintf(buf + ret, PAGE_SIZE - ret, " %d",
-#if IS_ENABLED(CONFIG_ABROLHOS)
- tpu_states_arr[i]);
-#else
- tpu_states_display[i]);
-#endif
+ edgetpu_states_display[i]);
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
@@ -338,7 +306,7 @@ static ssize_t tpu_usage_show(struct device *dev,
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%d:",
uid_entry->uid);
- for (i = 0; i < NUM_TPU_STATES; i++)
+ for (i = 0; i < EDGETPU_NUM_STATES; i++)
ret += scnprintf(buf + ret, PAGE_SIZE - ret, " %lld",
uid_entry->time_in_state[i]);
diff --git a/drivers/edgetpu/edgetpu.h b/drivers/edgetpu/edgetpu.h
index db6b6b8..6a210fb 100644
--- a/drivers/edgetpu/edgetpu.h
+++ b/drivers/edgetpu/edgetpu.h
@@ -17,6 +17,8 @@
/* mmap offsets for logging and tracing buffers */
#define EDGETPU_MMAP_LOG_BUFFER_OFFSET 0x1B00000
#define EDGETPU_MMAP_TRACE_BUFFER_OFFSET 0x1C00000
+#define EDGETPU_MMAP_LOG1_BUFFER_OFFSET 0x1D00000
+#define EDGETPU_MMAP_TRACE1_BUFFER_OFFSET 0x1E00000
/* EdgeTPU map flag macros */
@@ -40,11 +42,12 @@ typedef __u32 edgetpu_map_flag_t;
/* Offset and mask to set the PBHA bits of IOMMU mappings */
#define EDGETPU_MAP_ATTR_PBHA_SHIFT 5
#define EDGETPU_MAP_ATTR_PBHA_MASK 0xf
+/* Create coherent mapping of the buffer */
+#define EDGETPU_MAP_COHERENT (1u << 9)
/* External mailbox types */
#define EDGETPU_EXT_MAILBOX_TYPE_TZ 1
#define EDGETPU_EXT_MAILBOX_TYPE_GSA 2
-#define EDGETPU_EXT_MAILBOX_TYPE_DSP 3
struct edgetpu_map_ioctl {
__u64 host_address; /* user-space address to be mapped */
@@ -78,7 +81,12 @@ struct edgetpu_map_ioctl {
* 1 = Skip CPU sync.
* Note: This bit is ignored on the map call.
* [8:5] - Value of PBHA bits for IOMMU mappings. For Abrolhos only.
- * [31:9] - RESERVED
+ * [9:9] - Coherent Mapping:
+ * 0 = Create non-coherent mappings of the buffer.
+ * 1 = Create coherent mappings of the buffer.
+ * Note: this attribute may be ignored on platforms where
+ * the TPU is not I/O coherent.
+ * [31:10] - RESERVED
*/
edgetpu_map_flag_t flags;
/*
@@ -289,7 +297,7 @@ struct edgetpu_map_dmabuf_ioctl {
*/
__u64 device_address;
/* A dma-buf FD. */
- int dmabuf_fd;
+ __s32 dmabuf_fd;
/*
* Flags indicating mapping attributes. See edgetpu_map_ioctl.flags for
* details.
diff --git a/drivers/edgetpu/include/linux/acpm_dvfs.h b/drivers/edgetpu/include/linux/acpm_dvfs.h
new file mode 100644
index 0000000..6872abe
--- /dev/null
+++ b/drivers/edgetpu/include/linux/acpm_dvfs.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Fallback header for systems without Exynos ACPM support.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#ifndef __ACPM_DVFS_H__
+#define __ACPM_DVFS_H__
+
+static inline int exynos_acpm_set_init_freq(unsigned int dfs_id, unsigned long freq)
+{
+ return 0;
+}
+
+static inline int exynos_acpm_set_policy(unsigned int id, unsigned long policy)
+{
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_EDGETPU_TEST)
+
+int exynos_acpm_set_rate(unsigned int id, unsigned long rate);
+unsigned long exynos_acpm_get_rate(unsigned int id, unsigned long dbg_val);
+
+#else /* IS_ENABLED(CONFIG_EDGETPU_TEST) */
+
+static inline int exynos_acpm_set_rate(unsigned int id, unsigned long rate)
+{
+ return 0;
+}
+
+static inline unsigned long exynos_acpm_get_rate(unsigned int id, unsigned long dbg_val)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_EDGETPU_TEST) */
+
+#endif /* __ACPM_DVFS_H__ */
diff --git a/drivers/edgetpu/include/linux/gsa/gsa_tpu.h b/drivers/edgetpu/include/linux/gsa/gsa_tpu.h
new file mode 100644
index 0000000..2dc8a79
--- /dev/null
+++ b/drivers/edgetpu/include/linux/gsa/gsa_tpu.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Fallback header for systems without GSA support.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#ifndef __LINUX_GSA_TPU_H
+#define __LINUX_GSA_TPU_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+#if IS_ENABLED(CONFIG_EDGETPU_TEST)
+
+int gsa_load_tpu_fw_image(struct device *gsa, dma_addr_t img_meta, phys_addr_t img_body);
+
+#else /* IS_ENABLED(CONFIG_EDGETPU_TEST) */
+
+static inline int gsa_load_tpu_fw_image(struct device *gsa, dma_addr_t img_meta,
+ phys_addr_t img_body)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_EDGETPU_TEST) */
+
+static inline int gsa_unload_tpu_fw_image(struct device *gsa)
+{
+ return 0;
+}
+
+enum gsa_tpu_state {
+ GSA_TPU_STATE_INACTIVE = 0,
+ GSA_TPU_STATE_LOADED,
+ GSA_TPU_STATE_RUNNING,
+ GSA_TPU_STATE_SUSPENDED,
+};
+
+enum gsa_tpu_cmd {
+ GSA_TPU_GET_STATE = 0,
+ GSA_TPU_START,
+ GSA_TPU_SUSPEND,
+ GSA_TPU_RESUME,
+ GSA_TPU_SHUTDOWN,
+};
+
+static inline int gsa_send_tpu_cmd(struct device *gsa, enum gsa_tpu_cmd cmd)
+{
+ return 0;
+}
+
+#endif /* __LINUX_GSA_TPU_H */
diff --git a/drivers/edgetpu/mobile-debug-dump.c b/drivers/edgetpu/mobile-debug-dump.c
new file mode 100644
index 0000000..3732fbb
--- /dev/null
+++ b/drivers/edgetpu/mobile-debug-dump.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implements methods common to the family of EdgeTPUs for mobile devices to retrieve host side
+ * debug dump segments and report them to SSCD.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#include <linux/mutex.h>
+#include <linux/platform_data/sscoredump.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+
+#include "edgetpu-device-group.h"
+#include "edgetpu-mailbox.h"
+#include "mobile-debug-dump.h"
+
+#include "edgetpu-debug-dump.c"
+
+struct mobile_sscd_mappings_dump *
+mobile_sscd_collect_mappings_segment(struct edgetpu_device_group **groups, size_t num_groups,
+ struct sscd_segment *sscd_seg)
+{
+ struct mobile_sscd_mappings_dump *mappings_dump;
+ struct edgetpu_mapping_root *mappings;
+ struct rb_node *node;
+ void *resized_arr;
+ size_t idx = 0, mappings_num = 0, new_size = 0;
+
+ mappings_dump = kmalloc(sizeof(struct mobile_sscd_mappings_dump), GFP_KERNEL);
+ for (idx = 0; idx < num_groups; idx++) {
+ mutex_lock(&groups[idx]->lock);
+ new_size += groups[idx]->host_mappings.count *
+ sizeof(struct mobile_sscd_mappings_dump);
+ resized_arr = krealloc(mappings_dump, new_size, GFP_KERNEL);
+ if (!resized_arr) {
+ kfree(mappings_dump);
+ mutex_unlock(&groups[idx]->lock);
+ return NULL;
+ }
+
+ mappings = &groups[idx]->host_mappings;
+ for (node = rb_first(&mappings->rb); node; node = rb_next(node)) {
+ struct edgetpu_mapping *map =
+ container_of(node, struct edgetpu_mapping, node);
+
+ mappings_dump[mappings_num].host_address = map->host_address;
+ mappings_dump[mappings_num].device_address = map->device_address;
+ mappings_dump[mappings_num].alloc_iova = map->alloc_iova;
+ mappings_dump[mappings_num].size = (u64)map->alloc_size;
+ mappings_num++;
+ }
+ new_size += groups[idx]->dmabuf_mappings.count *
+ sizeof(struct mobile_sscd_mappings_dump);
+ resized_arr = krealloc(mappings_dump, new_size, GFP_KERNEL);
+ if (!resized_arr) {
+ kfree(mappings_dump);
+ mutex_unlock(&groups[idx]->lock);
+ return NULL;
+ }
+
+ mappings = &groups[idx]->dmabuf_mappings;
+ for (node = rb_first(&mappings->rb); node; node = rb_next(node)) {
+ struct edgetpu_mapping *map =
+ container_of(node, struct edgetpu_mapping, node);
+
+ mappings_dump[mappings_num].host_address = map->host_address;
+ mappings_dump[mappings_num].device_address = map->device_address;
+ mappings_dump[mappings_num].alloc_iova = map->alloc_iova;
+ mappings_dump[mappings_num].size = (u64)map->alloc_size;
+ mappings_num++;
+ }
+ mutex_unlock(&groups[idx]->lock);
+ }
+
+ sscd_seg->addr = mappings_dump;
+ sscd_seg->size = new_size;
+ sscd_seg->vaddr = mappings_dump;
+
+ return mappings_dump;
+}
+
+size_t mobile_sscd_collect_cmd_resp_queues(struct edgetpu_dev *etdev,
+ struct edgetpu_device_group **groups, size_t num_groups,
+ struct sscd_segment *sscd_seg_arr)
+{
+ struct edgetpu_kci *kci;
+ size_t idx;
+ u16 num_queues = 0;
+
+ // Collect VII cmd and resp queues
+ for (idx = 0; idx < num_groups; idx++) {
+ mutex_lock(&groups[idx]->lock);
+ if (!edgetpu_group_mailbox_detached_locked(groups[idx])) {
+ sscd_seg_arr[num_queues].addr =
+ (void *)groups[idx]->vii.cmd_queue_mem.vaddr;
+ sscd_seg_arr[num_queues].size = groups[idx]->vii.cmd_queue_mem.size;
+ sscd_seg_arr[num_queues].paddr =
+ (void *)groups[idx]->vii.cmd_queue_mem.tpu_addr;
+ sscd_seg_arr[num_queues].vaddr =
+ (void *)groups[idx]->vii.cmd_queue_mem.vaddr;
+ num_queues++;
+
+ sscd_seg_arr[num_queues].addr =
+ (void *)groups[idx]->vii.resp_queue_mem.vaddr;
+ sscd_seg_arr[num_queues].size = groups[idx]->vii.resp_queue_mem.size;
+ sscd_seg_arr[num_queues].paddr =
+ (void *)groups[idx]->vii.resp_queue_mem.tpu_addr;
+ sscd_seg_arr[num_queues].vaddr =
+ (void *)groups[idx]->vii.resp_queue_mem.vaddr;
+ num_queues++;
+ }
+ mutex_unlock(&groups[idx]->lock);
+ }
+
+ // Collect KCI cmd and resp queues
+ kci = etdev->kci;
+ sscd_seg_arr[num_queues].addr = (void *)kci->cmd_queue_mem.vaddr;
+ sscd_seg_arr[num_queues].size = MAX_QUEUE_SIZE * sizeof(struct edgetpu_command_element);
+ sscd_seg_arr[num_queues].paddr = (void *)kci->cmd_queue_mem.tpu_addr;
+ sscd_seg_arr[num_queues].vaddr = (void *)kci->cmd_queue_mem.vaddr;
+ num_queues++;
+
+ sscd_seg_arr[num_queues].addr = (void *)kci->resp_queue_mem.vaddr;
+ sscd_seg_arr[num_queues].size = MAX_QUEUE_SIZE *
+ sizeof(struct edgetpu_kci_response_element);
+ sscd_seg_arr[num_queues].paddr = (void *)kci->resp_queue_mem.tpu_addr;
+ sscd_seg_arr[num_queues].vaddr = (void *)kci->resp_queue_mem.vaddr;
+ num_queues++;
+
+ return num_queues;
+}
diff --git a/drivers/edgetpu/mobile-debug-dump.h b/drivers/edgetpu/mobile-debug-dump.h
new file mode 100644
index 0000000..9eaf069
--- /dev/null
+++ b/drivers/edgetpu/mobile-debug-dump.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Module that defines structure to retrieve debug dump segments
+ * specific to the family of EdgeTPUs for mobile devices.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#ifndef __MOBILE_DEBUG_DUMP_H__
+#define __MOBILE_DEBUG_DUMP_H__
+
+#include "edgetpu-debug-dump.h"
+
+struct mobile_sscd_info {
+ void *pdata; /* SSCD platform data */
+ void *dev; /* SSCD platform device */
+};
+
+struct mobile_sscd_mappings_dump {
+ u64 host_address;
+ u64 device_address;
+ u64 alloc_iova;
+ u64 size;
+};
+
+struct sscd_segment;
+
+/*
+ * Collects the mapping information of all the host mapping and dmabuf mapping buffers of all
+ * @groups as an array of struct mobile_sscd_mappings_dump and populates the @sscd_seg.
+ *
+ * Returns the pointer to the first element of the mappings dump array. The allocated array should
+ * be freed by the caller after the sscd segment is reported.
+ * Returns NULL in case of failure.
+ */
+struct mobile_sscd_mappings_dump *
+mobile_sscd_collect_mappings_segment(struct edgetpu_device_group **groups, size_t num_groups,
+ struct sscd_segment *sscd_seg);
+
+/*
+ * Collects the VII cmd and resp queues of all @groups that @etdev belongs to and the KCI cmd and
+ * resp queues and populates them as @sscd_seg_arr elements.
+ *
+ * Returns the total number of queues collected since some queues may have been released for groups
+ * with detached mailboxes. The return value is less than or equal to the total number of queues
+ * expected based on @num_groups i.e (2 * @num_groups +2).
+ */
+size_t mobile_sscd_collect_cmd_resp_queues(struct edgetpu_dev *etdev,
+ struct edgetpu_device_group **groups, size_t num_groups,
+ struct sscd_segment *sscd_seg_arr);
+
+#endif /* MOBILE_DEBUG_DUMP_H_ */
diff --git a/drivers/edgetpu/mobile-firmware.c b/drivers/edgetpu/mobile-firmware.c
new file mode 100644
index 0000000..f021a32
--- /dev/null
+++ b/drivers/edgetpu/mobile-firmware.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Edge TPU firmware management for mobile chipsets.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/gsa/gsa_tpu.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "edgetpu.h"
+#include "edgetpu-config.h"
+#include "edgetpu-firmware.h"
+#include "edgetpu-firmware-util.h"
+#include "edgetpu-internal.h"
+#include "edgetpu-kci.h"
+#include "edgetpu-mailbox.h"
+#include "edgetpu-mmu.h"
+#include "edgetpu-mobile-platform.h"
+#include "mobile-firmware.h"
+
+static struct mobile_image_config *mobile_firmware_get_image_config(struct edgetpu_dev *etdev)
+{
+ return (struct mobile_image_config *) edgetpu_firmware_get_data(etdev->firmware);
+}
+
+static void mobile_firmware_clear_mappings(struct edgetpu_dev *etdev,
+ struct mobile_image_config *image_config)
+{
+ tpu_addr_t tpu_addr;
+ size_t size;
+ int i;
+
+ for (i = 0; i < image_config->num_iommu_mapping; i++) {
+ tpu_addr = image_config->mappings[i].virt_address;
+ size = CONFIG_TO_SIZE(image_config->mappings[i].image_config_value);
+ edgetpu_mmu_remove_translation(etdev, tpu_addr, size, EDGETPU_CONTEXT_KCI);
+ }
+}
+
+static int mobile_firmware_setup_mappings(struct edgetpu_dev *etdev,
+ struct mobile_image_config *image_config)
+{
+ int i, ret;
+ tpu_addr_t tpu_addr;
+ size_t size;
+ phys_addr_t phys_addr;
+
+ for (i = 0; i < image_config->num_iommu_mapping; i++) {
+ tpu_addr = image_config->mappings[i].virt_address;
+ if (!tpu_addr) {
+ etdev_warn(etdev, "Invalid firmware header\n");
+ goto err;
+ }
+ size = CONFIG_TO_SIZE(image_config->mappings[i].image_config_value);
+ phys_addr = image_config->mappings[i].image_config_value & ~(0xFFF);
+
+ etdev_dbg(etdev, "Adding IOMMU mapping for firmware : %#llx -> %#llx", tpu_addr,
+ phys_addr);
+
+ ret = edgetpu_mmu_add_translation(etdev, tpu_addr, phys_addr, size,
+ IOMMU_READ | IOMMU_WRITE, EDGETPU_CONTEXT_KCI);
+ if (ret) {
+ etdev_err(etdev,
+ "Unable to Map: %d tpu_addr: %#llx phys_addr: %#llx size: %#lx\n",
+ ret, tpu_addr, phys_addr, size);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ tpu_addr = image_config->mappings[i].virt_address;
+ size = CONFIG_TO_SIZE(image_config->mappings[i].image_config_value);
+ edgetpu_mmu_remove_translation(etdev, tpu_addr, size, EDGETPU_CONTEXT_KCI);
+ }
+ return ret;
+}
+
+static void mobile_firmware_clear_ns_mappings(struct edgetpu_dev *etdev,
+ struct mobile_image_config *image_config)
+{
+ tpu_addr_t tpu_addr;
+ size_t size;
+ int i;
+
+ for (i = 0; i < image_config->num_ns_iommu_mappings; i++) {
+ tpu_addr = image_config->ns_iommu_mappings[i] & ~(0xFFF);
+ size = CONFIG_TO_MBSIZE(image_config->ns_iommu_mappings[i]);
+ edgetpu_mmu_remove_translation(etdev, tpu_addr, size, EDGETPU_CONTEXT_KCI);
+ }
+}
+
+static int mobile_firmware_setup_ns_mappings(struct edgetpu_dev *etdev,
+ struct mobile_image_config *image_config)
+{
+ tpu_addr_t tpu_addr;
+ size_t size = 0;
+ int ret = 0, i;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ phys_addr_t phys_addr = etmdev->fw_ctx_paddr;
+
+ for (i = 0; i < image_config->num_ns_iommu_mappings; i++)
+ size += CONFIG_TO_MBSIZE(image_config->ns_iommu_mappings[i]);
+
+ if (size > etmdev->fw_ctx_size) {
+ etdev_err(etdev, "Insufficient firmware context memory");
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < image_config->num_ns_iommu_mappings; i++) {
+ size = CONFIG_TO_MBSIZE(image_config->ns_iommu_mappings[i]);
+ tpu_addr = image_config->ns_iommu_mappings[i] & ~(0xFFF);
+ ret = edgetpu_mmu_add_translation(etdev, tpu_addr, phys_addr,
+ size, IOMMU_READ | IOMMU_WRITE,
+ EDGETPU_CONTEXT_KCI);
+ if (ret)
+ goto err;
+ phys_addr += size;
+ }
+
+ return 0;
+
+err:
+ while (i--) {
+ tpu_addr = image_config->ns_iommu_mappings[i] & ~(0xFFF);
+ size = CONFIG_TO_MBSIZE(image_config->ns_iommu_mappings[i]);
+ edgetpu_mmu_remove_translation(etdev, tpu_addr, size, EDGETPU_CONTEXT_KCI);
+ }
+ return ret;
+}
+
+static int mobile_firmware_after_create(struct edgetpu_firmware *et_fw)
+{
+ /*
+ * Use firmware data to keep a copy of the image config in order
+ * to avoid re-doing IOMMU mapping on each firmware run
+ */
+ struct mobile_image_config *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ edgetpu_firmware_set_data(et_fw, data);
+ return 0;
+}
+
+static void mobile_firmware_before_destroy(struct edgetpu_firmware *et_fw)
+{
+ struct mobile_image_config *image_config;
+ struct edgetpu_dev *etdev = et_fw->etdev;
+
+ image_config = mobile_firmware_get_image_config(etdev);
+
+ mobile_firmware_clear_ns_mappings(etdev, image_config);
+ if (image_config->privilege_level == FW_PRIV_LEVEL_NS)
+ mobile_firmware_clear_mappings(etdev, image_config);
+ edgetpu_firmware_set_data(et_fw, NULL);
+ kfree(image_config);
+}
+
+static int mobile_firmware_alloc_buffer(
+ struct edgetpu_firmware *et_fw,
+ struct edgetpu_firmware_buffer *fw_buf)
+{
+ struct edgetpu_dev *etdev = et_fw->etdev;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+
+ /* Allocate extra space the image header */
+ size_t buffer_size =
+ etmdev->fw_region_size + MOBILE_FW_HEADER_SIZE;
+
+ fw_buf->vaddr = vmalloc(buffer_size);
+ if (!fw_buf->vaddr) {
+ etdev_err(etdev, "%s: failed to allocate buffer (%zu bytes)\n",
+ __func__, buffer_size);
+ return -ENOMEM;
+ }
+ fw_buf->dma_addr = 0;
+ fw_buf->alloc_size = buffer_size;
+ fw_buf->used_size_align = 16;
+ return 0;
+}
+
+static void mobile_firmware_free_buffer(
+ struct edgetpu_firmware *et_fw,
+ struct edgetpu_firmware_buffer *fw_buf)
+{
+ vfree(fw_buf->vaddr);
+ fw_buf->vaddr = NULL;
+ fw_buf->dma_addr = 0;
+ fw_buf->alloc_size = 0;
+ fw_buf->used_size_align = 0;
+}
+
+static void mobile_firmware_save_image_config(struct edgetpu_dev *etdev,
+ struct mobile_image_config *image_config)
+{
+ struct mobile_image_config *saved_image_config = mobile_firmware_get_image_config(etdev);
+
+ memcpy(saved_image_config, image_config, sizeof(*saved_image_config));
+}
+
+static int mobile_firmware_gsa_authenticate(struct edgetpu_mobile_platform_dev *etmdev,
+ struct edgetpu_firmware_buffer *fw_buf,
+ struct mobile_image_config *image_config,
+ void *image_vaddr)
+{
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+ void *header_vaddr;
+ dma_addr_t header_dma_addr;
+ int tpu_state;
+ int ret = 0;
+
+ tpu_state = gsa_send_tpu_cmd(etmdev->gsa_dev, GSA_TPU_GET_STATE);
+
+ if (tpu_state < GSA_TPU_STATE_INACTIVE) {
+ etdev_err(etdev, "GSA failed to retrieve current status: %d\n", tpu_state);
+ return tpu_state;
+ }
+
+ etdev_dbg(etdev, "GSA Reports TPU state: %d\n", tpu_state);
+
+ if (tpu_state > GSA_TPU_STATE_INACTIVE) {
+ ret = gsa_unload_tpu_fw_image(etmdev->gsa_dev);
+ if (ret) {
+ etdev_warn(etdev, "GSA release failed: %d\n", ret);
+ return -EIO;
+ }
+ }
+
+ /* Copy the firmware image to the target location, skipping the header */
+ memcpy(image_vaddr, fw_buf->vaddr + MOBILE_FW_HEADER_SIZE,
+ fw_buf->used_size - MOBILE_FW_HEADER_SIZE);
+
+ /* Allocate coherent memory for the image header */
+ header_vaddr = dma_alloc_coherent(etmdev->gsa_dev,
+ MOBILE_FW_HEADER_SIZE,
+ &header_dma_addr, GFP_KERNEL);
+ if (!header_vaddr) {
+ etdev_err(etdev,
+ "Failed to allocate coherent memory for header\n");
+ return -ENOMEM;
+ }
+
+ memcpy(header_vaddr, fw_buf->vaddr, MOBILE_FW_HEADER_SIZE);
+ etdev_dbg(etdev, "Requesting GSA image load. meta = %llX payload = %llX", header_dma_addr,
+ (u64)etmdev->fw_region_paddr);
+
+ ret = gsa_load_tpu_fw_image(etmdev->gsa_dev, header_dma_addr,
+ etmdev->fw_region_paddr);
+ if (ret)
+ etdev_err(etdev, "GSA authentication failed: %d\n", ret);
+
+ dma_free_coherent(etmdev->gsa_dev, MOBILE_FW_HEADER_SIZE, header_vaddr, header_dma_addr);
+
+ return ret;
+}
+
+/* TODO(b/197074886): remove once rio has GSA support */
+static void program_iremap_csr(struct edgetpu_dev *etdev)
+{
+ const int ctx_id = 0, sid0 = 0x30, sid1 = 0x34;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ phys_addr_t fw_paddr = etmdev->fw_region_paddr;
+
+ edgetpu_dev_write_32(etdev, EDGETPU_REG_INSTRUCTION_REMAP_SECURITY, (ctx_id << 16) | sid0);
+ edgetpu_dev_write_32(etdev, EDGETPU_REG_INSTRUCTION_REMAP_SECURITY + 8,
+ (ctx_id << 16) | sid1);
+#if defined(ZEBU_SYSMMU_WORKAROUND)
+ /*
+ * This is required on ZeBu after b/197718405 is fixed, which forwards all transactions to
+ * the non-secure SysMMU.
+ */
+ fw_paddr = EDGETPU_INSTRUCTION_REMAP_BASE;
+#endif
+ edgetpu_dev_write_32(etdev, EDGETPU_REG_INSTRUCTION_REMAP_NEW_BASE, fw_paddr);
+ edgetpu_dev_write_32(etdev, EDGETPU_REG_INSTRUCTION_REMAP_NEW_BASE + 8, fw_paddr);
+
+ edgetpu_dev_write_32(etdev, EDGETPU_REG_INSTRUCTION_REMAP_LIMIT,
+ EDGETPU_INSTRUCTION_REMAP_BASE + SZ_16M);
+ edgetpu_dev_write_32(etdev, EDGETPU_REG_INSTRUCTION_REMAP_LIMIT + 8,
+ EDGETPU_INSTRUCTION_REMAP_BASE + SZ_16M);
+
+ edgetpu_dev_write_32(etdev, EDGETPU_REG_INSTRUCTION_REMAP_CONTROL, 1);
+ edgetpu_dev_write_32(etdev, EDGETPU_REG_INSTRUCTION_REMAP_CONTROL + 8, 1);
+}
+
+static int mobile_firmware_prepare_run(struct edgetpu_firmware *et_fw,
+ struct edgetpu_firmware_buffer *fw_buf)
+{
+ struct edgetpu_dev *etdev = et_fw->etdev;
+
+ /* Reset KCI mailbox before starting f/w, don't process anything old.*/
+ edgetpu_mailbox_reset(etdev->kci->mailbox);
+
+ if (IS_ENABLED(CONFIG_RIO))
+ program_iremap_csr(etdev);
+
+ return edgetpu_mobile_firmware_reset_cpu(etdev, false);
+}
+
+static int mobile_firmware_restart(struct edgetpu_firmware *et_fw, bool force_reset)
+{
+ struct edgetpu_dev *etdev = et_fw->etdev;
+
+ /*
+ * We are in a bad state, reset the CPU and hope the device recovers.
+ * Ignore failures in the reset assert request and proceed to reset release.
+ */
+ if (force_reset)
+ edgetpu_mobile_firmware_reset_cpu(etdev, true);
+
+ return edgetpu_mobile_firmware_reset_cpu(etdev, false);
+}
+
+static int mobile_firmware_setup_buffer(struct edgetpu_firmware *et_fw,
+ struct edgetpu_firmware_buffer *fw_buf)
+{
+ int ret = 0;
+ void *image_vaddr;
+ struct edgetpu_dev *etdev = et_fw->etdev;
+ struct mobile_image_config *image_config;
+ struct mobile_image_config *last_image_config = mobile_firmware_get_image_config(etdev);
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ phys_addr_t image_start, image_end, carveout_start, carveout_end;
+ bool image_config_changed;
+
+ if (fw_buf->used_size < MOBILE_FW_HEADER_SIZE) {
+ etdev_err(etdev, "Invalid buffer size: %zu < %d\n",
+ fw_buf->used_size, MOBILE_FW_HEADER_SIZE);
+ return -EINVAL;
+ }
+
+ image_vaddr = memremap(etmdev->fw_region_paddr,
+ etmdev->fw_region_size, MEMREMAP_WC);
+ if (!image_vaddr) {
+ etdev_err(etdev, "memremap failed\n");
+ return -ENOMEM;
+ }
+
+ /* fetch the firmware versions */
+ image_config = fw_buf->vaddr + MOBILE_IMAGE_CONFIG_OFFSET;
+ memcpy(&etdev->fw_version, &image_config->firmware_versions,
+ sizeof(etdev->fw_version));
+
+ image_config_changed = memcmp(image_config, last_image_config, sizeof(*image_config));
+
+ if (etmdev->gsa_dev) {
+ ret = mobile_firmware_gsa_authenticate(etmdev, fw_buf, image_config, image_vaddr);
+ } else if (image_config->privilege_level == FW_PRIV_LEVEL_NS) {
+ etdev_dbg(etdev, "Loading unauthenticated non-secure firmware\n");
+ /* Copy the firmware image to the target location, skipping the header */
+ memcpy(image_vaddr, fw_buf->vaddr + MOBILE_FW_HEADER_SIZE,
+ fw_buf->used_size - MOBILE_FW_HEADER_SIZE);
+ } else {
+ etdev_err(etdev,
+ "Cannot load firmware at privilege level %d with no authentication\n",
+ image_config->privilege_level);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto out;
+
+ image_start = (phys_addr_t)image_config->carveout_base;
+ image_end = (phys_addr_t)(image_config->firmware_base + image_config->firmware_size - 1);
+ carveout_start = etmdev->fw_region_paddr;
+ carveout_end = carveout_start + etmdev->fw_region_size - 1;
+
+ /* Image must fit within the carveout */
+ if (image_start < carveout_start || image_end > carveout_end) {
+ etdev_err(etdev, "Firmware image doesn't fit in carveout\n");
+ etdev_err(etdev, "Image config: %pap - %pap\n", &image_start, &image_end);
+ etdev_err(etdev, "Carveout: %pap - %pap\n", &carveout_start, &carveout_end);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ if (image_config_changed) {
+ /* clear last image mappings */
+ if (last_image_config->privilege_level == FW_PRIV_LEVEL_NS)
+ mobile_firmware_clear_mappings(etdev, last_image_config);
+
+ if (image_config->privilege_level == FW_PRIV_LEVEL_NS)
+ ret = mobile_firmware_setup_mappings(etdev, image_config);
+ if (ret)
+ goto out;
+ mobile_firmware_clear_ns_mappings(etdev, last_image_config);
+ ret = mobile_firmware_setup_ns_mappings(etdev, image_config);
+ if (ret) {
+ mobile_firmware_clear_mappings(etdev, image_config);
+ goto out;
+ }
+ mobile_firmware_save_image_config(etdev, image_config);
+ }
+out:
+ memunmap(image_vaddr);
+ return ret;
+}
+
+/* Load firmware for chips that use carveout memory for a single chip. */
+int edgetpu_firmware_chip_load_locked(
+ struct edgetpu_firmware *et_fw,
+ struct edgetpu_firmware_desc *fw_desc, const char *name)
+{
+ int ret;
+ struct edgetpu_dev *etdev = et_fw->etdev;
+ struct device *dev = etdev->dev;
+ const struct firmware *fw;
+ size_t aligned_size;
+
+ ret = request_firmware(&fw, name, dev);
+ if (ret) {
+ etdev_dbg(etdev,
+ "%s: request '%s' failed: %d\n", __func__, name, ret);
+ return ret;
+ }
+
+ aligned_size = ALIGN(fw->size, fw_desc->buf.used_size_align);
+ if (aligned_size > fw_desc->buf.alloc_size) {
+ etdev_dbg(etdev,
+ "%s: firmware buffer too small: alloc size=%#zx, required size=%#zx\n",
+ __func__, fw_desc->buf.alloc_size, aligned_size);
+ ret = -ENOSPC;
+ goto out_release_firmware;
+ }
+
+ memcpy(fw_desc->buf.vaddr, fw->data, fw->size);
+ fw_desc->buf.used_size = aligned_size;
+ /* May return NULL on out of memory, driver must handle properly */
+ fw_desc->buf.name = kstrdup(name, GFP_KERNEL);
+
+out_release_firmware:
+ release_firmware(fw);
+ return ret;
+}
+
+void edgetpu_firmware_chip_unload_locked(
+ struct edgetpu_firmware *et_fw,
+ struct edgetpu_firmware_desc *fw_desc)
+{
+ kfree(fw_desc->buf.name);
+ fw_desc->buf.name = NULL;
+ fw_desc->buf.used_size = 0;
+}
+
+int edgetpu_mobile_firmware_reset_cpu(struct edgetpu_dev *etdev, bool assert_reset)
+{
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct mobile_image_config *image_config = mobile_firmware_get_image_config(etdev);
+ int ret = 0;
+
+ if (image_config->privilege_level == FW_PRIV_LEVEL_NS) {
+ int i;
+
+ for (i = 0; i < EDGETPU_NUM_CORES; i++)
+ edgetpu_dev_write_32_sync(etdev, EDGETPU_REG_RESET_CONTROL + i * 8,
+ assert_reset ? 1 : 0);
+ }
+ else if (etmdev->gsa_dev)
+ ret = gsa_send_tpu_cmd(etmdev->gsa_dev,
+ assert_reset ? GSA_TPU_SHUTDOWN : GSA_TPU_START);
+ else
+ ret = -ENODEV;
+
+ etdev_dbg(etdev, "%s CPU reset result = %d", assert_reset ? "assert" : "release", ret);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct edgetpu_firmware_chip_data mobile_firmware_chip_data = {
+ .default_firmware_name = EDGETPU_DEFAULT_FIRMWARE_NAME,
+ .after_create = mobile_firmware_after_create,
+ .before_destroy = mobile_firmware_before_destroy,
+ .alloc_buffer = mobile_firmware_alloc_buffer,
+ .free_buffer = mobile_firmware_free_buffer,
+ .setup_buffer = mobile_firmware_setup_buffer,
+ .prepare_run = mobile_firmware_prepare_run,
+ .restart = mobile_firmware_restart,
+};
+
+int edgetpu_mobile_firmware_create(struct edgetpu_dev *etdev)
+{
+ return edgetpu_firmware_create(etdev, &mobile_firmware_chip_data);
+}
+
+void edgetpu_mobile_firmware_destroy(struct edgetpu_dev *etdev)
+{
+ edgetpu_firmware_destroy(etdev);
+}
+
+unsigned long edgetpu_chip_firmware_iova(struct edgetpu_dev *etdev)
+{
+ /*
+ * On mobile platforms, firmware address translation may happen in 1 or 2 stages:
+ * 1.- Instruction remap registers.
+ * 2.- IOMMU translation (when not running in GSA privilege).
+ * In either case, the address seen by the TPU's CPU core will remain constant, and
+ * equal to the macro below.
+ */
+ return EDGETPU_INSTRUCTION_REMAP_BASE;
+}
diff --git a/drivers/edgetpu/mobile-firmware.h b/drivers/edgetpu/mobile-firmware.h
index 691eaf5..d17d0c0 100644
--- a/drivers/edgetpu/mobile-firmware.h
+++ b/drivers/edgetpu/mobile-firmware.h
@@ -12,6 +12,13 @@
#include "edgetpu-internal.h"
#include "edgetpu.h"
+#define MAX_IOMMU_MAPPINGS 23
+#define MAX_NS_IOMMU_MAPPINGS 5
+
+#define FW_PRIV_LEVEL_GSA (0)
+#define FW_PRIV_LEVEL_TZ (1)
+#define FW_PRIV_LEVEL_NS (2)
+
/* mobile FW header size */
#define MOBILE_FW_HEADER_SIZE SZ_4K
/* The offset to the signed firmware header. */
@@ -19,6 +26,20 @@
/* The offset to image configuration. */
#define MOBILE_IMAGE_CONFIG_OFFSET (MOBILE_HEADER_OFFSET + 0x160)
+#define CONFIG_TO_SIZE(a) ((1 << ((a) & 0xFFF)) << 12)
+
+#define CONFIG_TO_MBSIZE(a) (((a) & 0xFFF) << 20)
+
+struct iommu_mapping {
+ /* TPU virt address */
+ __u32 virt_address;
+ /*
+ * contains a 12-bit aligned address and a page-order size into a
+ * 32-bit value i.e. a physical address and size in page order.
+ */
+ __u32 image_config_value;
+};
+
/*
* The image configuration attached to the signed firmware.
*/
@@ -27,6 +48,14 @@ struct mobile_image_config {
__u32 firmware_base;
__u32 firmware_size;
struct edgetpu_fw_version firmware_versions;
+ __u32 config_version;
+ __u32 privilege_level;
+ __u32 remapped_region_start;
+ __u32 remapped_region_end;
+ __u32 num_iommu_mapping;
+ struct iommu_mapping mappings[MAX_IOMMU_MAPPINGS];
+ __u32 num_ns_iommu_mappings;
+ __u32 ns_iommu_mappings[MAX_NS_IOMMU_MAPPINGS];
} __packed;
/*
@@ -46,7 +75,15 @@ struct mobile_image_header {
struct mobile_image_config ImageConfig;
};
-int mobile_edgetpu_firmware_create(struct edgetpu_dev *etdev);
-void mobile_edgetpu_firmware_destroy(struct edgetpu_dev *etdev);
+int edgetpu_mobile_firmware_create(struct edgetpu_dev *etdev);
+void edgetpu_mobile_firmware_destroy(struct edgetpu_dev *etdev);
+
+/*
+ * Assert or release the reset signal of the TPU's CPU
+ * Depending on privilege level, this may be by a direct register write
+ * or a call into GSA.
+ */
+int edgetpu_mobile_firmware_reset_cpu(struct edgetpu_dev *etdev, bool assert_reset);
+
#endif /* __MOBILE_FIRMWARE_H__ */
diff --git a/drivers/edgetpu/mobile-pm.c b/drivers/edgetpu/mobile-pm.c
new file mode 100644
index 0000000..b753231
--- /dev/null
+++ b/drivers/edgetpu/mobile-pm.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common EdgeTPU mobile power management support
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/gsa/gsa_tpu.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <soc/google/bcl.h>
+#include <soc/google/bts.h>
+#include <soc/google/exynos_pm_qos.h>
+
+#include "edgetpu-config.h"
+#include "edgetpu-firmware.h"
+#include "edgetpu-internal.h"
+#include "edgetpu-kci.h"
+#include "edgetpu-mailbox.h"
+#include "edgetpu-mobile-platform.h"
+#include "edgetpu-pm.h"
+#include "mobile-firmware.h"
+#include "mobile-pm.h"
+
+#include "edgetpu-pm.c"
+
+/*
+ * Encode INT/MIF values as a 16 bit pair in the 32-bit return value
+ * (in units of MHz, to provide enough range)
+ */
+#define PM_QOS_INT_SHIFT (16)
+#define PM_QOS_MIF_MASK (0xFFFF)
+#define PM_QOS_FACTOR (1000)
+
+static int power_state = TPU_DEFAULT_POWER_STATE;
+
+module_param(power_state, int, 0660);
+
+#define MAX_VOLTAGE_VAL 1250000
+
+enum edgetpu_pwr_state edgetpu_active_states[EDGETPU_NUM_STATES] = {
+ TPU_ACTIVE_UUD,
+ TPU_ACTIVE_SUD,
+ TPU_ACTIVE_UD,
+ TPU_ACTIVE_NOM,
+#if IS_ENABLED(CONFIG_ABROLHOS)
+ TPU_ACTIVE_OD,
+#endif /* IS_ENABLED(CONFIG_ABROLHOS) */
+};
+
+uint32_t *edgetpu_states_display = edgetpu_active_states;
+
+static int mobile_pwr_state_init(struct device *dev)
+{
+ int ret;
+ int curr_state;
+
+ pm_runtime_enable(dev);
+ curr_state = exynos_acpm_get_rate(TPU_ACPM_DOMAIN, 0);
+
+ if (curr_state > TPU_OFF) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ pm_runtime_put_noidle(dev);
+ dev_err(dev, "pm_runtime_get_sync returned %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = exynos_acpm_set_init_freq(TPU_ACPM_DOMAIN, curr_state);
+ if (ret) {
+ dev_err(dev, "error initializing tpu state: %d\n", ret);
+ if (curr_state > TPU_OFF)
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int edgetpu_core_rate_get(void *data, u64 *val)
+{
+ *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
+ TPU_DEBUG_REQ | TPU_CLK_CORE_DEBUG);
+ return 0;
+}
+
+static int edgetpu_core_rate_set(void *data, u64 val)
+{
+ unsigned long dbg_rate_req;
+
+ dbg_rate_req = TPU_DEBUG_REQ | TPU_CLK_CORE_DEBUG;
+ dbg_rate_req |= val;
+
+ return exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
+}
+
+static int edgetpu_ctl_rate_get(void *data, u64 *val)
+{
+ *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
+ TPU_DEBUG_REQ | TPU_CLK_CTL_DEBUG);
+ return 0;
+}
+
+static int edgetpu_ctl_rate_set(void *data, u64 val)
+{
+ unsigned long dbg_rate_req;
+
+ dbg_rate_req = TPU_DEBUG_REQ | TPU_CLK_CTL_DEBUG;
+ dbg_rate_req |= 1000;
+
+ return exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
+}
+
+static int edgetpu_axi_rate_get(void *data, u64 *val)
+{
+ *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
+ TPU_DEBUG_REQ | TPU_CLK_AXI_DEBUG);
+ return 0;
+}
+
+static int edgetpu_axi_rate_set(void *data, u64 val)
+{
+ unsigned long dbg_rate_req;
+
+ dbg_rate_req = TPU_DEBUG_REQ | TPU_CLK_AXI_DEBUG;
+ dbg_rate_req |= 1000;
+
+ return exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
+}
+
+static int edgetpu_apb_rate_get(void *data, u64 *val)
+{
+ *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
+ TPU_DEBUG_REQ | TPU_CLK_APB_DEBUG);
+ return 0;
+}
+
+static int edgetpu_uart_rate_get(void *data, u64 *val)
+{
+ *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
+ TPU_DEBUG_REQ | TPU_CLK_UART_DEBUG);
+ return 0;
+}
+
+static int edgetpu_vdd_int_m_set(void *data, u64 val)
+{
+ struct device *dev = (struct device *)data;
+ unsigned long dbg_rate_req;
+
+ if (val > MAX_VOLTAGE_VAL) {
+ dev_err(dev, "Preventing INT_M voltage > %duV",
+ MAX_VOLTAGE_VAL);
+ return -EINVAL;
+ }
+
+ dbg_rate_req = TPU_DEBUG_REQ | TPU_VDD_INT_M_DEBUG;
+ dbg_rate_req |= val;
+
+ return exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
+}
+
+static int edgetpu_vdd_int_m_get(void *data, u64 *val)
+{
+ *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
+ TPU_DEBUG_REQ | TPU_VDD_INT_M_DEBUG);
+ return 0;
+}
+
+static int edgetpu_vdd_tpu_set(void *data, u64 val)
+{
+ int ret;
+ struct device *dev = (struct device *)data;
+ unsigned long dbg_rate_req;
+
+ if (val > MAX_VOLTAGE_VAL) {
+ dev_err(dev, "Preventing VDD_TPU voltage > %duV",
+ MAX_VOLTAGE_VAL);
+ return -EINVAL;
+ }
+
+ dbg_rate_req = TPU_DEBUG_REQ | TPU_VDD_TPU_DEBUG;
+ dbg_rate_req |= val;
+
+ ret = exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
+ return ret;
+}
+
+static int edgetpu_vdd_tpu_get(void *data, u64 *val)
+{
+ *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
+ TPU_DEBUG_REQ | TPU_VDD_TPU_DEBUG);
+ return 0;
+}
+
+static int edgetpu_vdd_tpu_m_set(void *data, u64 val)
+{
+ int ret;
+ struct device *dev = (struct device *)data;
+ unsigned long dbg_rate_req;
+
+ if (val > MAX_VOLTAGE_VAL) {
+ dev_err(dev, "Preventing VDD_TPU voltage > %duV",
+ MAX_VOLTAGE_VAL);
+ return -EINVAL;
+ }
+
+ dbg_rate_req = TPU_DEBUG_REQ | TPU_VDD_TPU_M_DEBUG;
+ dbg_rate_req |= val;
+
+ ret = exynos_acpm_set_rate(TPU_ACPM_DOMAIN, dbg_rate_req);
+ return ret;
+}
+
+static int edgetpu_vdd_tpu_m_get(void *data, u64 *val)
+{
+ *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN,
+ TPU_DEBUG_REQ | TPU_VDD_TPU_M_DEBUG);
+ return 0;
+}
+
+static int mobile_pwr_state_set_locked(struct edgetpu_mobile_platform_dev *etmdev, u64 val)
+{
+ int ret;
+ int curr_state;
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ struct device *dev = etdev->dev;
+
+ curr_state = exynos_acpm_get_rate(TPU_ACPM_DOMAIN, 0);
+
+ dev_dbg(dev, "Power state %d -> %llu\n", curr_state, val);
+
+ if (curr_state == TPU_OFF && val > TPU_OFF) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ pm_runtime_put_noidle(dev);
+ dev_err(dev, "pm_runtime_get_sync returned %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = platform_pwr->acpm_set_rate(TPU_ACPM_DOMAIN, (unsigned long)val);
+ if (ret) {
+ dev_err(dev, "error setting tpu state: %d\n", ret);
+ pm_runtime_put_sync(dev);
+ return ret;
+ }
+
+ if (curr_state != TPU_OFF && val == TPU_OFF) {
+ ret = pm_runtime_put_sync(dev);
+ if (ret) {
+ dev_err(dev, "%s: pm_runtime_put_sync returned %d\n", __func__, ret);
+ return ret;
+ }
+ if (platform_pwr->block_down)
+ platform_pwr->block_down(etdev);
+ }
+
+ return ret;
+}
+
+static int mobile_pwr_state_get_locked(void *data, u64 *val)
+{
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+ struct device *dev = etdev->dev;
+
+ *val = exynos_acpm_get_rate(TPU_ACPM_DOMAIN, 0);
+ dev_dbg(dev, "current tpu state: %llu\n", *val);
+
+ return 0;
+}
+
+static int mobile_pwr_state_set(void *data, u64 val)
+{
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ int ret = 0;
+
+ mutex_lock(&platform_pwr->state_lock);
+ platform_pwr->requested_state = val;
+ if (val >= platform_pwr->min_state)
+ ret = mobile_pwr_state_set_locked(etmdev, val);
+ mutex_unlock(&platform_pwr->state_lock);
+ return ret;
+}
+
+static int mobile_pwr_state_get(void *data, u64 *val)
+{
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ int ret;
+
+ mutex_lock(&platform_pwr->state_lock);
+ ret = mobile_pwr_state_get_locked(etdev, val);
+ mutex_unlock(&platform_pwr->state_lock);
+ return ret;
+}
+
+static int mobile_min_pwr_state_set(void *data, u64 val)
+{
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ int ret = 0;
+
+ mutex_lock(&platform_pwr->state_lock);
+ platform_pwr->min_state = val;
+ if (val >= platform_pwr->requested_state)
+ ret = mobile_pwr_state_set_locked(etmdev, val);
+ mutex_unlock(&platform_pwr->state_lock);
+ return ret;
+}
+
+static int mobile_min_pwr_state_get(void *data, u64 *val)
+{
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+
+ mutex_lock(&platform_pwr->state_lock);
+ *val = platform_pwr->min_state;
+ mutex_unlock(&platform_pwr->state_lock);
+ return 0;
+}
+
+static int mobile_pwr_policy_set(void *data, u64 val)
+{
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ int ret;
+
+ mutex_lock(&platform_pwr->policy_lock);
+ ret = exynos_acpm_set_policy(TPU_ACPM_DOMAIN, val);
+
+ if (ret) {
+ dev_err(etmdev->edgetpu_dev.dev,
+ "unable to set policy %lld (ret %d)\n", val, ret);
+ mutex_unlock(&platform_pwr->policy_lock);
+ return ret;
+ }
+
+ platform_pwr->curr_policy = val;
+ mutex_unlock(&platform_pwr->policy_lock);
+ return 0;
+}
+
+static int mobile_pwr_policy_get(void *data, u64 *val)
+{
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+
+ mutex_lock(&platform_pwr->policy_lock);
+ *val = platform_pwr->curr_policy;
+ mutex_unlock(&platform_pwr->policy_lock);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_pwr_policy, mobile_pwr_policy_get, mobile_pwr_policy_set,
+ "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_pwr_state, mobile_pwr_state_get, mobile_pwr_state_set, "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_min_pwr_state, mobile_min_pwr_state_get, mobile_min_pwr_state_set,
+ "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_core_rate, edgetpu_core_rate_get,
+ edgetpu_core_rate_set, "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_ctl_rate, edgetpu_ctl_rate_get,
+ edgetpu_ctl_rate_set, "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_axi_rate, edgetpu_axi_rate_get,
+ edgetpu_axi_rate_set, "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_apb_rate, edgetpu_apb_rate_get, NULL,
+ "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_uart_rate, edgetpu_uart_rate_get, NULL,
+ "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_vdd_int_m, edgetpu_vdd_int_m_get,
+ edgetpu_vdd_int_m_set, "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_vdd_tpu, edgetpu_vdd_tpu_get,
+ edgetpu_vdd_tpu_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_vdd_tpu_m, edgetpu_vdd_tpu_m_get,
+ edgetpu_vdd_tpu_m_set, "%llu\n");
+
+static int mobile_get_initial_pwr_state(struct device *dev)
+{
+ switch (power_state) {
+#if IS_ENABLED(CONFIG_ABROLHOS)
+ case TPU_ACTIVE_OD:
+ case TPU_DEEP_SLEEP_CLOCKS_SLOW:
+ case TPU_DEEP_SLEEP_CLOCKS_FAST:
+ case TPU_RETENTION_CLOCKS_SLOW:
+#endif /* IS_ENABLED(CONFIG_ABROLHOS) */
+ case TPU_ACTIVE_UUD:
+ case TPU_ACTIVE_SUD:
+ case TPU_ACTIVE_UD:
+ case TPU_ACTIVE_NOM:
+ dev_info(dev, "Initial power state: %d\n", power_state);
+ break;
+ case TPU_OFF:
+#if IS_ENABLED(CONFIG_ABROLHOS)
+ case TPU_DEEP_SLEEP_CLOCKS_OFF:
+ case TPU_SLEEP_CLOCKS_OFF:
+#endif /* IS_ENABLED(CONFIG_ABROLHOS) */
+ dev_warn(dev, "Power state %d prevents control core booting", power_state);
+ fallthrough;
+ default:
+ dev_warn(dev, "Power state %d is invalid\n", power_state);
+ dev_warn(dev, "defaulting to active nominal\n");
+ power_state = TPU_ACTIVE_NOM;
+ break;
+ }
+ return power_state;
+}
+
+static int mobile_power_down(struct edgetpu_pm *etpm);
+
+static int mobile_power_up(struct edgetpu_pm *etpm)
+{
+ struct edgetpu_dev *etdev = etpm->etdev;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ int ret = mobile_pwr_state_set(etpm->etdev, mobile_get_initial_pwr_state(etdev->dev));
+
+ etdev_info(etpm->etdev, "Powering up\n");
+
+ if (ret)
+ return ret;
+
+ if (platform_pwr->lpm_up)
+ platform_pwr->lpm_up(etdev);
+
+ edgetpu_chip_init(etdev);
+
+ if (etdev->kci) {
+ etdev_dbg(etdev, "Resetting KCI\n");
+ edgetpu_kci_reinit(etdev->kci);
+ }
+ if (etdev->mailbox_manager) {
+ etdev_dbg(etdev, "Resetting VII mailboxes\n");
+ edgetpu_mailbox_reset_vii(etdev->mailbox_manager);
+ }
+
+ if (!etdev->firmware)
+ return 0;
+
+ /*
+ * Why this function uses edgetpu_firmware_*_locked functions without explicitly holding
+ * edgetpu_firmware_lock:
+ *
+ * edgetpu_pm_get() is called in two scenarios - one is when the firmware loading is
+ * attempting, another one is when the user-space clients need the device be powered
+ * (usually through acquiring the wakelock).
+ *
+ * For the first scenario edgetpu_firmware_is_loading() below shall return true.
+ * For the second scenario we are indeed called without holding the firmware lock, but the
+ * firmware loading procedures (i.e. the first scenario) always call edgetpu_pm_get() before
+ * changing the firmware state, and edgetpu_pm_get() is blocked until this function
+ * finishes. In short, we are protected by the PM lock.
+ */
+
+ if (edgetpu_firmware_is_loading(etdev))
+ return 0;
+
+ /* attempt firmware run */
+ switch (edgetpu_firmware_status_locked(etdev)) {
+ case FW_VALID:
+ ret = edgetpu_firmware_restart_locked(etdev, false);
+ break;
+ case FW_INVALID:
+ ret = edgetpu_firmware_run_default_locked(etdev);
+ break;
+ default:
+ break;
+ }
+ if (ret) {
+ mobile_power_down(etpm);
+ } else {
+#if IS_ENABLED(CONFIG_GOOGLE_BCL)
+ if (!etmdev->bcl_dev)
+ etmdev->bcl_dev = google_retrieve_bcl_handle();
+ if (etmdev->bcl_dev)
+ google_init_tpu_ratio(etmdev->bcl_dev);
+#endif
+ }
+
+ return ret;
+}
+
+static void mobile_pm_cleanup_bts_scenario(struct edgetpu_dev *etdev)
+{
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ int performance_scenario = platform_pwr->performance_scenario;
+
+ if (!performance_scenario)
+ return;
+
+ mutex_lock(&platform_pwr->scenario_lock);
+ while (platform_pwr->scenario_count) {
+ int ret = bts_del_scenario(performance_scenario);
+
+ if (ret) {
+ platform_pwr->scenario_count = 0;
+ etdev_warn_once(etdev, "error %d in cleaning up BTS scenario %u\n", ret,
+ performance_scenario);
+ break;
+ }
+ platform_pwr->scenario_count--;
+ }
+ mutex_unlock(&platform_pwr->scenario_lock);
+}
+
+static int mobile_power_down(struct edgetpu_pm *etpm)
+{
+ struct edgetpu_dev *etdev = etpm->etdev;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ u64 val;
+ int res = 0;
+ int min_state = platform_pwr->min_state;
+
+ etdev_info(etdev, "Powering down\n");
+
+ if (min_state >= MIN_ACTIVE_STATE) {
+ etdev_info(etdev, "Power down skipped due to min state = %d\n", min_state);
+ return 0;
+ }
+
+ if (mobile_pwr_state_get(etdev, &val)) {
+ etdev_warn(etdev, "Failed to read current power state\n");
+ val = TPU_ACTIVE_NOM;
+ }
+ if (val == TPU_OFF) {
+ etdev_dbg(etdev, "Device already off, skipping shutdown\n");
+ return 0;
+ }
+
+ if (edgetpu_firmware_status_locked(etdev) == FW_VALID) {
+ etdev_dbg(etdev, "Power down with valid firmware, device state = %d\n",
+ etdev->state);
+ if (etdev->state == ETDEV_STATE_GOOD) {
+ /* Update usage stats before we power off fw. */
+ edgetpu_kci_update_usage_locked(etdev);
+ platform_pwr->firmware_down(etdev);
+ /* Ensure firmware is completely off */
+ if (platform_pwr->lpm_down)
+ platform_pwr->lpm_down(etdev);
+ /* Indicate firmware is no longer running */
+ etdev->state = ETDEV_STATE_NOFW;
+ }
+ edgetpu_kci_cancel_work_queues(etdev->kci);
+ res = edgetpu_mobile_firmware_reset_cpu(etdev, true);
+ /* TODO(b/198181290): remove -EIO once gsaproxy wakelock is implemented */
+ if (res == -EAGAIN || res == -EIO)
+ return -EAGAIN;
+ if (res < 0)
+ etdev_warn(etdev, "CPU reset request failed (%d)\n", res);
+ }
+
+ mobile_pwr_state_set(etdev, TPU_OFF);
+
+ /* Remove our vote for INT/MIF state (if any) */
+ exynos_pm_qos_update_request(&platform_pwr->int_min, 0);
+ exynos_pm_qos_update_request(&platform_pwr->mif_min, 0);
+
+ mobile_pm_cleanup_bts_scenario(etdev);
+
+ /*
+ * It should be impossible that power_down() is called when secure_client is set.
+ * Non-null secure_client implies ext mailbox is acquired, which implies wakelock is
+ * acquired.
+ * Clear the state here just in case.
+ */
+ etmdev->secure_client = NULL;
+
+ return 0;
+}
+
+static int mobile_pm_after_create(struct edgetpu_pm *etpm)
+{
+ int ret;
+ struct edgetpu_dev *etdev = etpm->etdev;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct device *dev = etdev->dev;
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+
+ ret = mobile_pwr_state_init(dev);
+ if (ret)
+ return ret;
+
+ mutex_init(&platform_pwr->policy_lock);
+ mutex_init(&platform_pwr->state_lock);
+ mutex_init(&platform_pwr->scenario_lock);
+
+ exynos_pm_qos_add_request(&platform_pwr->int_min, PM_QOS_DEVICE_THROUGHPUT, 0);
+ exynos_pm_qos_add_request(&platform_pwr->mif_min, PM_QOS_BUS_THROUGHPUT, 0);
+
+ platform_pwr->performance_scenario = bts_get_scenindex("tpu_performance");
+ if (!platform_pwr->performance_scenario)
+ dev_warn(etdev->dev, "tpu_performance BTS scenario not found\n");
+ platform_pwr->scenario_count = 0;
+
+ ret = mobile_pwr_state_set(etdev, mobile_get_initial_pwr_state(dev));
+ if (ret)
+ return ret;
+ platform_pwr->debugfs_dir = debugfs_create_dir("power", edgetpu_fs_debugfs_dir());
+ if (IS_ERR_OR_NULL(platform_pwr->debugfs_dir)) {
+ dev_warn(etdev->dev, "Failed to create debug FS power");
+ /* don't fail the procedure on debug FS creation fails */
+ return 0;
+ }
+ debugfs_create_file("state", 0660, platform_pwr->debugfs_dir, etdev, &fops_tpu_pwr_state);
+ debugfs_create_file("min_state", 0660, platform_pwr->debugfs_dir, etdev,
+ &fops_tpu_min_pwr_state);
+ debugfs_create_file("policy", 0660, platform_pwr->debugfs_dir, etdev, &fops_tpu_pwr_policy);
+ debugfs_create_file("vdd_tpu", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_vdd_tpu);
+ debugfs_create_file("vdd_tpu_m", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_vdd_tpu_m);
+ debugfs_create_file("vdd_int_m", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_vdd_int_m);
+ debugfs_create_file("core_rate", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_core_rate);
+ debugfs_create_file("ctl_rate", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_ctl_rate);
+ debugfs_create_file("axi_rate", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_axi_rate);
+ debugfs_create_file("apb_rate", 0440, platform_pwr->debugfs_dir, dev, &fops_tpu_apb_rate);
+ debugfs_create_file("uart_rate", 0440, platform_pwr->debugfs_dir, dev, &fops_tpu_uart_rate);
+
+ if (platform_pwr->after_create)
+ ret = platform_pwr->after_create(etdev);
+
+ return ret;
+}
+
+static void mobile_pm_before_destroy(struct edgetpu_pm *etpm)
+{
+ struct edgetpu_dev *etdev = etpm->etdev;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+
+ if (platform_pwr->before_destroy)
+ platform_pwr->before_destroy(etdev);
+
+ debugfs_remove_recursive(platform_pwr->debugfs_dir);
+ pm_runtime_disable(etpm->etdev->dev);
+ mobile_pm_cleanup_bts_scenario(etdev);
+ exynos_pm_qos_remove_request(&platform_pwr->int_min);
+ exynos_pm_qos_remove_request(&platform_pwr->mif_min);
+}
+
+static struct edgetpu_pm_handlers mobile_pm_handlers = {
+ .after_create = mobile_pm_after_create,
+ .before_destroy = mobile_pm_before_destroy,
+ .power_up = mobile_power_up,
+ .power_down = mobile_power_down,
+};
+
+int mobile_pm_create(struct edgetpu_dev *etdev)
+{
+ return edgetpu_pm_create(etdev, &mobile_pm_handlers);
+}
+
+void mobile_pm_destroy(struct edgetpu_dev *etdev)
+{
+ edgetpu_pm_destroy(etdev);
+}
+
+void mobile_pm_set_pm_qos(struct edgetpu_dev *etdev, u32 pm_qos_val)
+{
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ s32 int_val = (pm_qos_val >> PM_QOS_INT_SHIFT) * PM_QOS_FACTOR;
+ s32 mif_val = (pm_qos_val & PM_QOS_MIF_MASK) * PM_QOS_FACTOR;
+
+ etdev_dbg(etdev, "%s: pm_qos request - int = %d mif = %d\n", __func__, int_val, mif_val);
+
+ exynos_pm_qos_update_request(&platform_pwr->int_min, int_val);
+ exynos_pm_qos_update_request(&platform_pwr->mif_min, mif_val);
+}
+
+static void mobile_pm_activate_bts_scenario(struct edgetpu_dev *etdev)
+{
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ int performance_scenario = platform_pwr->performance_scenario;
+
+ /* bts_add_scenario() keeps track of reference count internally.*/
+ int ret;
+
+ if (!performance_scenario)
+ return;
+ mutex_lock(&platform_pwr->scenario_lock);
+ ret = bts_add_scenario(performance_scenario);
+ if (ret)
+ etdev_warn_once(etdev, "error %d adding BTS scenario %u\n", ret,
+ performance_scenario);
+ else
+ platform_pwr->scenario_count++;
+
+ etdev_dbg(etdev, "BTS Scenario activated: %d\n", platform_pwr->scenario_count);
+ mutex_unlock(&platform_pwr->scenario_lock);
+}
+
+static void mobile_pm_deactivate_bts_scenario(struct edgetpu_dev *etdev)
+{
+ /* bts_del_scenario() keeps track of reference count internally.*/
+ int ret;
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ int performance_scenario = platform_pwr->performance_scenario;
+
+ if (!performance_scenario)
+ return;
+ mutex_lock(&platform_pwr->scenario_lock);
+ if (!platform_pwr->scenario_count) {
+ etdev_warn(etdev, "Unbalanced bts deactivate\n");
+ mutex_unlock(&platform_pwr->scenario_lock);
+ return;
+ }
+ ret = bts_del_scenario(performance_scenario);
+ if (ret)
+ etdev_warn_once(etdev, "error %d deleting BTS scenario %u\n", ret,
+ performance_scenario);
+ else
+ platform_pwr->scenario_count--;
+
+ etdev_dbg(etdev, "BTS Scenario deactivated: %d\n", platform_pwr->scenario_count);
+ mutex_unlock(&platform_pwr->scenario_lock);
+}
+
+void mobile_pm_set_bts(struct edgetpu_dev *etdev, u32 bts_val)
+{
+ etdev_dbg(etdev, "%s: bts request - val = %u\n", __func__, bts_val);
+
+ switch (bts_val) {
+ case 0:
+ mobile_pm_deactivate_bts_scenario(etdev);
+ break;
+ case 1:
+ mobile_pm_activate_bts_scenario(etdev);
+ break;
+ default:
+ etdev_warn(etdev, "%s: invalid BTS request value: %u\n", __func__, bts_val);
+ break;
+ }
+}
diff --git a/drivers/edgetpu/mobile-pm.h b/drivers/edgetpu/mobile-pm.h
new file mode 100644
index 0000000..0cb4b77
--- /dev/null
+++ b/drivers/edgetpu/mobile-pm.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Power management header for mobile chipsets.
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#ifndef __MOBILE_PM_H__
+#define __MOBILE_PM_H__
+
+#include "edgetpu-internal.h"
+#include "edgetpu-kci.h"
+
+/* Can't build out of tree with acpm_dvfs unless kernel supports ACPM */
+#if IS_ENABLED(CONFIG_ACPM_DVFS) || IS_ENABLED(CONFIG_EDGETPU_TEST)
+
+#include <linux/acpm_dvfs.h>
+
+#else
+
+static unsigned long exynos_acpm_rate;
+static inline int exynos_acpm_set_rate(unsigned int id, unsigned long rate)
+{
+ exynos_acpm_rate = rate;
+ return 0;
+}
+static inline int exynos_acpm_set_init_freq(unsigned int dfs_id,
+ unsigned long freq)
+{
+ return 0;
+}
+static inline unsigned long exynos_acpm_get_rate(unsigned int id,
+ unsigned long dbg_val)
+{
+ return exynos_acpm_rate;
+}
+static inline int exynos_acpm_set_policy(unsigned int id, unsigned long policy)
+{
+ return 0;
+}
+#endif /* IS_ENABLED(CONFIG_ACPM_DVFS) || IS_ENABLED(CONFIG_EDGETPU_TEST) */
+
+/*
+ * Request codes from firmware
+ * Values must match with firmware code base
+ */
+enum mobile_reverse_kci_code {
+ RKCI_CODE_PM_QOS = RKCI_CHIP_CODE_FIRST + 1,
+ RKCI_CODE_BTS = RKCI_CHIP_CODE_FIRST + 2,
+};
+
+#define MAX_VOLTAGE_VAL 1250000
+#define TPU_DEBUG_REQ (1 << 31)
+#define TPU_VDD_TPU_DEBUG (0 << 27)
+#define TPU_VDD_TPU_M_DEBUG (1 << 27)
+#define TPU_VDD_INT_M_DEBUG (2 << 27)
+#define TPU_CLK_CORE_DEBUG (3 << 27)
+#define TPU_CLK_CTL_DEBUG (4 << 27)
+#define TPU_CLK_AXI_DEBUG (5 << 27)
+#define TPU_CLK_APB_DEBUG (6 << 27)
+#define TPU_CLK_UART_DEBUG (7 << 27)
+#define TPU_CORE_PWR_DEBUG (8 << 27)
+#define TPU_DEBUG_VALUE_MASK ((1 << 27) - 1)
+
+/*
+ * Initialize a power management interface for an edgetpu device on mobile
+ * chipsets.
+ * Needs to be called after the devices's platform_pwr struct has been initialized.
+ */
+int mobile_pm_create(struct edgetpu_dev *etdev);
+
+/*
+ * Wrapper for chip-specific implementation.
+ * Typically calls mobile_pm_create after initializing the platform_pwr struct.
+ */
+int edgetpu_chip_pm_create(struct edgetpu_dev *etdev);
+
+/*
+ * Destroy power management interface for an edgetpu device on mobile chipsets.
+ */
+void mobile_pm_destroy(struct edgetpu_dev *etdev);
+
+/* Set required QoS value for the edgetpu device. */
+void mobile_pm_set_pm_qos(struct edgetpu_dev *etdev, u32 pm_qos_val);
+
+/* Set BTS value for the edgetpu device. */
+void mobile_pm_set_bts(struct edgetpu_dev *etdev, u32 bts_val);
+
+#endif /* __MOBILE_PM_H__ */
diff --git a/drivers/edgetpu/mobile-thermal.c b/drivers/edgetpu/mobile-thermal.c
new file mode 100644
index 0000000..c1b1f69
--- /dev/null
+++ b/drivers/edgetpu/mobile-thermal.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common EdgeTPU mobile thermal management support
+ *
+ * Copyright (C) 2021 Google, Inc.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include <linux/version.h>
+#include <soc/google/gs101_tmu.h>
+
+#include "edgetpu-config.h"
+#include "edgetpu-internal.h"
+#include "edgetpu-kci.h"
+#include "edgetpu-mmu.h"
+#include "edgetpu-pm.h"
+#include "edgetpu-thermal.h"
+#include "mobile-pm.h"
+
+#define MAX_NUM_TPU_STATES 10
+#define OF_DATA_NUM_MAX (MAX_NUM_TPU_STATES * 2)
+static struct edgetpu_state_pwr state_pwr_map[MAX_NUM_TPU_STATES] = {0};
+
+/*
+ * Sends the thermal throttling KCI if the device is powered.
+ *
+ * Returns the return value of KCI if the device is powered, otherwise 0.
+ */
+static int edgetpu_thermal_kci_if_powered(struct edgetpu_dev *etdev, enum edgetpu_pwr_state state)
+{
+ int ret = 0;
+
+ if (edgetpu_pm_get_if_powered(etdev->pm)) {
+ ret = edgetpu_kci_notify_throttling(etdev, state);
+ if (ret)
+ etdev_err_ratelimited(etdev,
+ "Failed to notify FW about power state %u, error:%d",
+ state, ret);
+ edgetpu_pm_put(etdev->pm);
+ }
+ return ret;
+}
+
+static int edgetpu_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ struct edgetpu_thermal *thermal = cdev->devdata;
+
+ if (thermal->tpu_num_states <= 0)
+ return -ENODEV;
+
+ *state = thermal->tpu_num_states - 1;
+ return 0;
+}
+
+/*
+ * Set cooling state.
+ */
+static int edgetpu_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state_original)
+{
+ int ret;
+ struct edgetpu_thermal *cooling = cdev->devdata;
+ struct device *dev = cooling->dev;
+ unsigned long pwr_state;
+
+ if (state_original >= cooling->tpu_num_states) {
+ dev_err(dev, "%s: invalid cooling state %lu\n", __func__, state_original);
+ return -EINVAL;
+ }
+
+ state_original = max(cooling->sysfs_req, state_original);
+
+ mutex_lock(&cooling->lock);
+ pwr_state = state_pwr_map[state_original].state;
+ if (state_original == cooling->cooling_state) {
+ ret = -EALREADY;
+ goto out;
+ }
+
+ /*
+ * Set the thermal policy through ACPM to allow cooling by DVFS. Any states lower
+ * than UUD should be handled by firmware when it gets the throttling notification
+ * KCI
+ */
+ if (pwr_state < TPU_ACTIVE_UUD) {
+ dev_warn_ratelimited(dev,
+ "Setting lowest DVFS state, waiting for FW to shutdown TPU");
+ ret = exynos_acpm_set_policy(TPU_ACPM_DOMAIN, TPU_ACTIVE_UUD);
+ } else {
+ ret = exynos_acpm_set_policy(TPU_ACPM_DOMAIN, pwr_state);
+ }
+
+ if (ret) {
+ dev_err(dev, "error setting tpu policy: %d\n", ret);
+ goto out;
+ }
+ cooling->cooling_state = state_original;
+out:
+ mutex_unlock(&cooling->lock);
+ return ret;
+}
+
+static int edgetpu_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state)
+{
+ int ret = 0;
+ struct edgetpu_thermal *cooling = cdev->devdata;
+
+ *state = cooling->cooling_state;
+ if (*state < cooling->tpu_num_states)
+ return 0;
+
+ dev_warn(cooling->dev, "Unknown cooling state: %lu, resetting\n", *state);
+ mutex_lock(&cooling->lock);
+
+#if IS_ENABLED(CONFIG_ABROLHOS)
+ ret = exynos_acpm_set_policy(TPU_ACPM_DOMAIN, TPU_ACTIVE_OD);
+#else
+ ret = exynos_acpm_set_policy(TPU_ACPM_DOMAIN, TPU_ACTIVE_NOM);
+#endif /* IS_ENABLED(CONFIG_ABROLHOS) */
+ if (ret) {
+ dev_err(cooling->dev, "error setting tpu policy: %d\n", ret);
+ mutex_unlock(&cooling->lock);
+ return ret;
+ }
+
+ /* setting back to "no cooling" */
+ cooling->cooling_state = 0;
+ mutex_unlock(&cooling->lock);
+
+ return 0;
+}
+
+static int edgetpu_state2power_internal(unsigned long state, u32 *power,
+ struct edgetpu_thermal *thermal)
+{
+ int i;
+
+ for (i = 0; i < thermal->tpu_num_states; ++i) {
+ if (state == state_pwr_map[i].state) {
+ *power = state_pwr_map[i].power;
+ return 0;
+ }
+ }
+ dev_err(thermal->dev, "Unknown state req for: %lu\n", state);
+ *power = 0;
+ return -EINVAL;
+}
+
+static int edgetpu_get_requested_power(struct thermal_cooling_device *cdev,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
+ struct thermal_zone_device *tz,
+#endif
+ u32 *power)
+{
+ unsigned long state_original;
+ struct edgetpu_thermal *cooling = cdev->devdata;
+
+ state_original = exynos_acpm_get_rate(TPU_ACPM_DOMAIN, 0);
+ return edgetpu_state2power_internal(state_original, power, cooling);
+}
+
+static int edgetpu_state2power(struct thermal_cooling_device *cdev,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
+ struct thermal_zone_device *tz,
+#endif
+ unsigned long state, u32 *power)
+{
+ struct edgetpu_thermal *cooling = cdev->devdata;
+
+ if (state >= cooling->tpu_num_states) {
+ dev_err(cooling->dev, "%s: invalid state: %lu\n", __func__, state);
+ return -EINVAL;
+ }
+
+ return edgetpu_state2power_internal(state_pwr_map[state].state, power, cooling);
+}
+
+static int edgetpu_power2state(struct thermal_cooling_device *cdev,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0)
+ struct thermal_zone_device *tz,
+#endif
+ u32 power, unsigned long *state)
+{
+ int i, penultimate_throttle_state;
+ struct edgetpu_thermal *thermal = cdev->devdata;
+
+ *state = 0;
+ if (thermal->tpu_num_states < 2)
+ return thermal->tpu_num_states == 1 ? 0 : -ENODEV;
+
+ penultimate_throttle_state = thermal->tpu_num_states - 2;
+ /*
+ * argument "power" is the maximum allowed power consumption in mW as
+ * defined by the PID control loop. Check for the first state that is
+ * less than or equal to the current allowed power. state_pwr_map is
+ * descending, so lowest power consumption is last value in the array
+ * return lowest state even if it consumes more power than allowed as
+ * not all platforms can handle throttling below an active state
+ */
+ for (i = penultimate_throttle_state; i >= 0; --i) {
+ if (power < state_pwr_map[i].power) {
+ *state = i + 1;
+ break;
+ }
+ }
+ return 0;
+}
+
+static struct thermal_cooling_device_ops edgetpu_cooling_ops = {
+ .get_max_state = edgetpu_get_max_state,
+ .get_cur_state = edgetpu_get_cur_state,
+ .set_cur_state = edgetpu_set_cur_state,
+ .get_requested_power = edgetpu_get_requested_power,
+ .state2power = edgetpu_state2power,
+ .power2state = edgetpu_power2state,
+};
+
+static void tpu_thermal_exit_cooling(struct edgetpu_thermal *thermal)
+{
+ if (!IS_ERR_OR_NULL(thermal->cdev))
+ thermal_cooling_device_unregister(thermal->cdev);
+}
+
+static void tpu_thermal_exit(struct edgetpu_thermal *thermal)
+{
+ tpu_thermal_exit_cooling(thermal);
+ debugfs_remove_recursive(thermal->cooling_root);
+}
+
+static void devm_tpu_thermal_release(struct device *dev, void *res)
+{
+ struct edgetpu_thermal *thermal = res;
+
+ tpu_thermal_exit(thermal);
+}
+
+static int tpu_thermal_parse_dvfs_table(struct edgetpu_thermal *thermal)
+{
+ int row_size, col_size, tbl_size, i;
+ int of_data_int_array[OF_DATA_NUM_MAX];
+
+ if (of_property_read_u32_array(thermal->dev->of_node, "tpu_dvfs_table_size",
+ of_data_int_array, 2))
+ goto error;
+
+ row_size = of_data_int_array[0];
+ col_size = of_data_int_array[1];
+ tbl_size = row_size * col_size;
+ if (row_size > MAX_NUM_TPU_STATES) {
+ dev_err(thermal->dev, "too many TPU states\n");
+ goto error;
+ }
+
+ if (tbl_size > OF_DATA_NUM_MAX)
+ goto error;
+
+ if (of_property_read_u32_array(thermal->dev->of_node, "tpu_dvfs_table", of_data_int_array,
+ tbl_size))
+ goto error;
+
+ thermal->tpu_num_states = row_size;
+ for (i = 0; i < row_size; ++i) {
+ int idx = col_size * i;
+
+ state_pwr_map[i].state = of_data_int_array[idx];
+ state_pwr_map[i].power = of_data_int_array[idx + 1];
+ }
+
+ return 0;
+
+error:
+ dev_err(thermal->dev, "failed to parse DVFS table\n");
+ return -EINVAL;
+}
+
+static ssize_t user_vote_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev =
+ container_of(dev, struct thermal_cooling_device, device);
+ struct edgetpu_thermal *cooling = cdev->devdata;
+
+ if (!cooling)
+ return -ENODEV;
+
+ return sysfs_emit(buf, "%lu\n", cooling->sysfs_req);
+}
+
+static ssize_t user_vote_store(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct thermal_cooling_device *cdev =
+ container_of(dev, struct thermal_cooling_device, device);
+ struct edgetpu_thermal *cooling = cdev->devdata;
+ int ret;
+ unsigned long state;
+
+ if (!cooling)
+ return -ENODEV;
+
+ ret = kstrtoul(buf, 0, &state);
+ if (ret)
+ return ret;
+
+ if (state >= cooling->tpu_num_states)
+ return -EINVAL;
+
+ mutex_lock(&cdev->lock);
+ cooling->sysfs_req = state;
+ cdev->updated = false;
+ mutex_unlock(&cdev->lock);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 12, 0)
+ thermal_cdev_update(cdev);
+#elif IS_ENABLED(CONFIG_THERMAL)
+ dev_err(dev, "Thermal update not implemented");
+#endif
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(user_vote);
+
+static int tpu_pause_callback(enum thermal_pause_state action, void *dev)
+{
+ int ret = -EINVAL;
+
+ if (!dev)
+ return ret;
+
+ if (action == THERMAL_SUSPEND)
+ ret = edgetpu_thermal_suspend(dev);
+ else if (action == THERMAL_RESUME)
+ ret = edgetpu_thermal_resume(dev);
+
+ return ret;
+}
+
+static int tpu_thermal_cooling_register(struct edgetpu_thermal *thermal, char *type)
+{
+ struct device_node *cooling_node = NULL;
+ int err = 0;
+
+ thermal->op_data = NULL;
+ thermal->tpu_num_states = 0;
+
+ err = tpu_thermal_parse_dvfs_table(thermal);
+ if (err)
+ return err;
+
+ mutex_init(&thermal->lock);
+ cooling_node = of_find_node_by_name(NULL, "tpu-cooling");
+ if (!cooling_node)
+ dev_warn(thermal->dev, "failed to find cooling node\n");
+ /* Initialize the cooling state as 0, means "no cooling" */
+ thermal->cooling_state = 0;
+ thermal->cdev = thermal_of_cooling_device_register(cooling_node, type, thermal,
+ &edgetpu_cooling_ops);
+ if (IS_ERR(thermal->cdev))
+ return PTR_ERR(thermal->cdev);
+
+ return device_create_file(&thermal->cdev->device, &dev_attr_user_vote);
+}
+
+static int tpu_thermal_init(struct edgetpu_thermal *thermal, struct device *dev)
+{
+ int err;
+ struct dentry *d;
+
+ thermal->dev = dev;
+ d = debugfs_create_dir("cooling", edgetpu_fs_debugfs_dir());
+ /* don't let debugfs creation failure abort the init procedure */
+ if (IS_ERR_OR_NULL(d))
+ dev_warn(dev, "failed to create debug fs for cooling");
+ thermal->cooling_root = d;
+
+ err = tpu_thermal_cooling_register(thermal, EDGETPU_COOLING_NAME);
+ if (err) {
+ dev_err(dev, "failed to initialize external cooling\n");
+ tpu_thermal_exit(thermal);
+ return err;
+ }
+
+ register_tpu_thermal_pause_cb(tpu_pause_callback, dev);
+
+ return 0;
+}
+
+struct edgetpu_thermal *devm_tpu_thermal_create(struct device *dev, struct edgetpu_dev *etdev)
+{
+ struct edgetpu_thermal *thermal;
+ int err;
+
+ thermal = devres_alloc(devm_tpu_thermal_release, sizeof(*thermal), GFP_KERNEL);
+ if (!thermal)
+ return ERR_PTR(-ENOMEM);
+
+ thermal->etdev = etdev;
+ err = tpu_thermal_init(thermal, dev);
+ if (err) {
+ devres_free(thermal);
+ return ERR_PTR(err);
+ }
+
+ devres_add(dev, thermal);
+ return thermal;
+}
+
+int edgetpu_thermal_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct edgetpu_dev *etdev = platform_get_drvdata(pdev);
+ struct edgetpu_thermal *cooling = etdev->thermal;
+ int ret = 0;
+
+ if (IS_ERR(cooling))
+ return PTR_ERR(cooling);
+ mutex_lock(&cooling->lock);
+ /*
+ * Always set as suspended even when the FW cannot handle the KCI (it's dead for some
+ * unknown reasons) because we still want to prevent the runtime from using TPU.
+ */
+ cooling->thermal_suspended = true;
+ ret = edgetpu_thermal_kci_if_powered(etdev, TPU_OFF);
+ mutex_unlock(&cooling->lock);
+ return ret;
+}
+
+int edgetpu_thermal_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct edgetpu_dev *etdev = platform_get_drvdata(pdev);
+ struct edgetpu_thermal *cooling = etdev->thermal;
+ int ret = 0;
+
+ if (IS_ERR(cooling))
+ return PTR_ERR(cooling);
+ mutex_lock(&cooling->lock);
+ ret = edgetpu_thermal_kci_if_powered(etdev, state_pwr_map[0].state);
+ /*
+ * Unlike edgetpu_thermal_suspend(), only set the device is resumed if the FW handled the
+ * KCI request.
+ */
+ if (!ret)
+ cooling->thermal_suspended = false;
+ mutex_unlock(&cooling->lock);
+ return ret;
+}