summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZuma copybara merger <zuma-automerger@google.com>2022-10-12 16:33:08 +0800
committerCopybara-Service <copybara-worker@google.com>2022-10-21 00:12:13 -0700
commitc99afef64f5492b8fa1a393471a7a7195f7952fa (patch)
tree6c2ed6cf1ef11c351ffb8ece9b388ac1f5d3099a
parentff26cd724d3fff0bf9d9af41dca9adb59e5a99dd (diff)
downloadrio-c99afef64f5492b8fa1a393471a7a7195f7952fa.tar.gz
[Copybara Auto Merge] Merge branch zuma into android13-gs-pixel-5.15
edgetpu: Read TPU core frequency from CMU Bug: 245094308 edgetpu: Update edgetpu_soc_pm_{set_rate,set_init_freq} Bug: 245094308 (repeat) edgetpu: Only set the is_block_off if PMU is present Bug: 245094308 (repeat) gcip: Use term awaiter instead of async_resp Bug: 249642792 gcip: Use term awaiter instead of async_resp Bug: 249642792 (repeat) Signed-off-by: Zuma copybara merger <zuma-automerger@google.com> GitOrigin-RevId: d20eabdcd5e116924fef05fa0f242595fa27065d Change-Id: I807ced1703263a6fd4ae500ff4dcd240c55dfd16
-rw-r--r--drivers/edgetpu/edgetpu-mobile-platform.c12
-rw-r--r--drivers/edgetpu/edgetpu-soc.h5
-rw-r--r--drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c109
-rw-r--r--drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-mailbox.h78
-rw-r--r--drivers/edgetpu/mobile-pm.c99
-rw-r--r--drivers/edgetpu/mobile-soc-gsx01.c168
-rw-r--r--drivers/edgetpu/mobile-soc-gsx01.h2
-rw-r--r--drivers/edgetpu/mobile-thermal.c2
-rw-r--r--drivers/edgetpu/rio-pm.c9
9 files changed, 268 insertions, 216 deletions
diff --git a/drivers/edgetpu/edgetpu-mobile-platform.c b/drivers/edgetpu/edgetpu-mobile-platform.c
index 30bb6f1..f9fe1fa 100644
--- a/drivers/edgetpu/edgetpu-mobile-platform.c
+++ b/drivers/edgetpu/edgetpu-mobile-platform.c
@@ -231,7 +231,9 @@ static void edgetpu_platform_parse_pmu(struct edgetpu_mobile_platform_dev *etmde
!of_property_read_u32_index(dev->of_node, "pmu-status-base", 0, &reg)) {
etmdev->pmu_status = devm_ioremap(dev, reg, 0x4);
if (!etmdev->pmu_status)
- etdev_info(etdev, "Using ACPM for blk status query\n");
+ etdev_err(etdev, "Using ACPM for blk status query\n");
+ } else {
+ etdev_warn(etdev, "Failed to find PMU register base\n");
}
}
@@ -363,6 +365,12 @@ static int edgetpu_mobile_platform_probe(struct platform_device *pdev,
goto out_cleanup_fw;
}
+ /*
+ * Parses PMU before edgetpu_device_add so edgetpu_chip_pm_create can know whether to set
+ * the is_block_down op.
+ */
+ edgetpu_platform_parse_pmu(etmdev);
+
ret = edgetpu_device_add(etdev, &regs, iface_params, ARRAY_SIZE(iface_params));
if (ret) {
dev_err(dev, "edgetpu setup failed: %d", ret);
@@ -375,8 +383,6 @@ static int edgetpu_mobile_platform_probe(struct platform_device *pdev,
goto out_remove_device;
}
- edgetpu_platform_parse_pmu(etmdev);
-
etmdev->log_mem = devm_kcalloc(dev, etdev->num_cores, sizeof(*etmdev->log_mem), GFP_KERNEL);
if (!etmdev->log_mem) {
ret = -ENOMEM;
diff --git a/drivers/edgetpu/edgetpu-soc.h b/drivers/edgetpu/edgetpu-soc.h
index bba617a..53d06d2 100644
--- a/drivers/edgetpu/edgetpu-soc.h
+++ b/drivers/edgetpu/edgetpu-soc.h
@@ -27,14 +27,11 @@ int edgetpu_soc_prepare_firmware(struct edgetpu_dev *etdev);
* @flags can be used by platform-specific code to pass additional flags to the SoC
* handler; for calls from generic code this value must be zero.
*/
-long edgetpu_soc_pm_get_rate(int flags);
+long edgetpu_soc_pm_get_rate(struct edgetpu_dev *etdev, int flags);
/* Power management set TPU clock rate */
int edgetpu_soc_pm_set_rate(unsigned long rate);
-/* Set initial TPU freq */
-int edgetpu_soc_pm_set_init_freq(unsigned long freq);
-
/* Set PM policy */
int edgetpu_soc_pm_set_policy(u64 val);
diff --git a/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c b/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
index 305407d..fc89d4d 100644
--- a/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
+++ b/drivers/edgetpu/gcip-kernel-driver/drivers/gcip/gcip-mailbox.c
@@ -43,7 +43,7 @@
struct gcip_mailbox_wait_list_elem {
struct list_head list;
void *resp;
- struct gcip_mailbox_async_response *async_resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
};
/*
@@ -74,7 +74,7 @@ static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox, void *resp)
}
/*
- * Adds @resp to @mailbox->wait_list. If @async_resp is not NULL, the @resp is asynchronous.
+ * Adds @resp to @mailbox->wait_list. If @awaiter is not NULL, the @resp is asynchronous.
* Otherwise, the @resp is synchronous.
*
* wait_list is a FIFO queue, with sequence number in increasing order.
@@ -82,7 +82,7 @@ static void gcip_mailbox_del_wait_resp(struct gcip_mailbox *mailbox, void *resp)
* Returns 0 on success, or -ENOMEM if failed on allocation.
*/
static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
- struct gcip_mailbox_async_response *async_resp)
+ struct gcip_mailbox_resp_awaiter *awaiter)
{
struct gcip_mailbox_wait_list_elem *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
unsigned long flags;
@@ -91,7 +91,7 @@ static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
return -ENOMEM;
entry->resp = resp;
- entry->async_resp = async_resp;
+ entry->awaiter = awaiter;
ACQUIRE_WAIT_LIST_LOCK(true, &flags);
list_add_tail(&entry->list, &mailbox->wait_list);
RELEASE_WAIT_LIST_LOCK(true, flags);
@@ -103,10 +103,10 @@ static int gcip_mailbox_push_wait_resp(struct gcip_mailbox *mailbox, void *resp,
* Pushes @cmd to the command queue of mailbox and returns. @resp should be passed if the request
* is synchronous and want to get the response. If @resp is NULL even though the request is
* synchronous, the @cmd will be put into the queue, but the caller may not wait the response and
- * ignore it. If the request is async, @async_resp should be passed too.
+ * ignore it. If the request is async, @awaiter should be passed too.
*/
static int gcip_mailbox_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp,
- struct gcip_mailbox_async_response *async_resp)
+ struct gcip_mailbox_resp_awaiter *awaiter)
{
int ret = 0;
u32 tail;
@@ -139,7 +139,7 @@ static int gcip_mailbox_enqueue_cmd(struct gcip_mailbox *mailbox, void *cmd, voi
/* Adds @resp to the wait_list only if the cmd can be pushed successfully. */
SET_RESP_ELEM_SEQ(resp, GET_CMD_ELEM_SEQ(cmd));
SET_RESP_ELEM_STATUS(resp, GCIP_MAILBOX_STATUS_WAITING_RESPONSE);
- ret = gcip_mailbox_push_wait_resp(mailbox, resp, async_resp);
+ ret = gcip_mailbox_push_wait_resp(mailbox, resp, awaiter);
if (ret)
goto out;
}
@@ -195,7 +195,7 @@ out:
static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *resp)
{
struct gcip_mailbox_wait_list_elem *cur, *nxt;
- struct gcip_mailbox_async_response *async_resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
unsigned long flags;
u64 cur_seq, seq = GET_RESP_ELEM_SEQ(resp);
@@ -219,19 +219,18 @@ static void gcip_mailbox_handle_response(struct gcip_mailbox *mailbox, void *res
if (cur_seq == seq) {
memcpy(cur->resp, resp, mailbox->resp_elem_size);
list_del(&cur->list);
- if (cur->async_resp) {
- async_resp = cur->async_resp;
- cancel_delayed_work(&async_resp->timeout_work);
+ if (cur->awaiter) {
+ awaiter = cur->awaiter;
+ cancel_delayed_work(&awaiter->timeout_work);
/*
- * If `handle_async_resp_arrived` callback is defined, @async_resp
+ * If `handle_awaiter_arrived` callback is defined, @awaiter
* will be released from the implementation side. Otherwise, it
* should be freed from here.
*/
- if (mailbox->ops->handle_async_resp_arrived)
- mailbox->ops->handle_async_resp_arrived(mailbox,
- async_resp);
+ if (mailbox->ops->handle_awaiter_arrived)
+ mailbox->ops->handle_awaiter_arrived(mailbox, awaiter);
else
- gcip_mailbox_release_async_resp(cur->async_resp);
+ gcip_mailbox_release_awaiter(cur->awaiter);
}
kfree(cur);
break;
@@ -360,9 +359,9 @@ static int gcip_mailbox_fetch_one_response(struct gcip_mailbox *mailbox, void *r
/* Handles the timed out asynchronous commands. */
static void gcip_mailbox_async_cmd_timeout_work(struct work_struct *work)
{
- struct gcip_mailbox_async_response *async_resp =
- container_of(work, struct gcip_mailbox_async_response, timeout_work.work);
- struct gcip_mailbox *mailbox = async_resp->mailbox;
+ struct gcip_mailbox_resp_awaiter *awaiter =
+ container_of(work, struct gcip_mailbox_resp_awaiter, timeout_work.work);
+ struct gcip_mailbox *mailbox = awaiter->mailbox;
/*
* This function will acquire the mailbox wait_list_lock. This means if
@@ -372,23 +371,23 @@ static void gcip_mailbox_async_cmd_timeout_work(struct work_struct *work)
* Once this function has the wait_list_lock, no future response
* processing will begin until this response has been removed.
*/
- gcip_mailbox_del_wait_resp(mailbox, async_resp->resp);
+ gcip_mailbox_del_wait_resp(mailbox, awaiter->resp);
/*
- * Handle timed out async_resp. If `handle_async_resp_timedout` is defined, @async_resp
+ * Handle timed out awaiter. If `handle_awaiter_timedout` is defined, @awaiter
* will be released from the implementation side. Otherwise, it should be freed from here.
*/
- if (mailbox->ops->handle_async_resp_timedout)
- mailbox->ops->handle_async_resp_timedout(mailbox, async_resp);
+ if (mailbox->ops->handle_awaiter_timedout)
+ mailbox->ops->handle_awaiter_timedout(mailbox, awaiter);
else
- gcip_mailbox_release_async_resp(async_resp);
+ gcip_mailbox_release_awaiter(awaiter);
}
/* Cleans up all the asynchronous responses which are not responded yet. */
-static void gcip_mailbox_flush_async_resp(struct gcip_mailbox *mailbox)
+static void gcip_mailbox_flush_awaiter(struct gcip_mailbox *mailbox)
{
struct gcip_mailbox_wait_list_elem *cur, *nxt;
- struct gcip_mailbox_async_response *async_resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
struct list_head resps_to_flush;
/* If mailbox->ops is NULL, the mailbox is already released. */
@@ -405,16 +404,16 @@ static void gcip_mailbox_flush_async_resp(struct gcip_mailbox *mailbox)
ACQUIRE_WAIT_LIST_LOCK(false, NULL);
list_for_each_entry_safe (cur, nxt, &mailbox->wait_list, list) {
list_del(&cur->list);
- if (cur->async_resp) {
+ if (cur->awaiter) {
list_add_tail(&cur->list, &resps_to_flush);
/*
* Clear the response's destination queue so that if the
* timeout worker is running, it won't try to process
* this response after `wait_list_lock` is released.
*/
- async_resp = cur->async_resp;
- if (mailbox->ops->flush_async_resp)
- mailbox->ops->flush_async_resp(mailbox, async_resp);
+ awaiter = cur->awaiter;
+ if (mailbox->ops->flush_awaiter)
+ mailbox->ops->flush_awaiter(mailbox, awaiter);
} else {
dev_warn(mailbox->dev,
"Unexpected synchronous command pending on mailbox release\n");
@@ -429,9 +428,9 @@ static void gcip_mailbox_flush_async_resp(struct gcip_mailbox *mailbox)
*/
list_for_each_entry_safe (cur, nxt, &resps_to_flush, list) {
list_del(&cur->list);
- async_resp = cur->async_resp;
- gcip_mailbox_cancel_async_resp_timeout(async_resp);
- gcip_mailbox_release_async_resp(async_resp);
+ awaiter = cur->awaiter;
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+ gcip_mailbox_release_awaiter(awaiter);
kfree(cur);
}
}
@@ -507,7 +506,7 @@ err_unset_data:
void gcip_mailbox_release(struct gcip_mailbox *mailbox)
{
- gcip_mailbox_flush_async_resp(mailbox);
+ gcip_mailbox_flush_awaiter(mailbox);
gcip_mailbox_set_ops(mailbox, NULL);
gcip_mailbox_set_data(mailbox, NULL);
}
@@ -563,46 +562,46 @@ int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp)
return 0;
}
-struct gcip_mailbox_async_response *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
- void *resp, void *data)
+struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ void *resp, void *data)
{
- struct gcip_mailbox_async_response *async_resp;
+ struct gcip_mailbox_resp_awaiter *awaiter;
int ret;
- async_resp = kzalloc(sizeof(*async_resp), GFP_KERNEL);
- if (!async_resp)
+ awaiter = kzalloc(sizeof(*awaiter), GFP_KERNEL);
+ if (!awaiter)
return ERR_PTR(-ENOMEM);
- async_resp->resp = resp;
- async_resp->mailbox = mailbox;
- async_resp->data = data;
- async_resp->release_data = mailbox->ops->release_async_resp_data;
+ awaiter->resp = resp;
+ awaiter->mailbox = mailbox;
+ awaiter->data = data;
+ awaiter->release_data = mailbox->ops->release_awaiter_data;
- INIT_DELAYED_WORK(&async_resp->timeout_work, gcip_mailbox_async_cmd_timeout_work);
- schedule_delayed_work(&async_resp->timeout_work, msecs_to_jiffies(mailbox->timeout));
+ INIT_DELAYED_WORK(&awaiter->timeout_work, gcip_mailbox_async_cmd_timeout_work);
+ schedule_delayed_work(&awaiter->timeout_work, msecs_to_jiffies(mailbox->timeout));
- ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, async_resp->resp, async_resp);
+ ret = gcip_mailbox_enqueue_cmd(mailbox, cmd, awaiter->resp, awaiter);
if (ret)
goto err_free_resp;
- return async_resp;
+ return awaiter;
err_free_resp:
- gcip_mailbox_cancel_async_resp_timeout(async_resp);
- kfree(async_resp);
+ gcip_mailbox_cancel_awaiter_timeout(awaiter);
+ kfree(awaiter);
return ERR_PTR(ret);
}
-void gcip_mailbox_cancel_async_resp_timeout(struct gcip_mailbox_async_response *async_resp)
+void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter)
{
- cancel_delayed_work_sync(&async_resp->timeout_work);
+ cancel_delayed_work_sync(&awaiter->timeout_work);
}
-void gcip_mailbox_release_async_resp(struct gcip_mailbox_async_response *async_resp)
+void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter)
{
- if (async_resp->release_data)
- async_resp->release_data(async_resp->data);
- kfree(async_resp);
+ if (awaiter->release_data)
+ awaiter->release_data(awaiter->data);
+ kfree(awaiter);
}
void gcip_mailbox_consume_one_response(struct gcip_mailbox *mailbox, void *resp)
diff --git a/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-mailbox.h b/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-mailbox.h
index 02af65e..9ea7876 100644
--- a/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-mailbox.h
+++ b/drivers/edgetpu/gcip-kernel-driver/include/gcip/gcip-mailbox.h
@@ -88,7 +88,7 @@ static inline bool gcip_valid_circ_queue_size(u32 size, u32 wrap_bit)
struct gcip_mailbox;
/* Wrapper struct for responses consumed by a thread other than the one which sent the command. */
-struct gcip_mailbox_async_response {
+struct gcip_mailbox_resp_awaiter {
/* Response. */
void *resp;
/* The work which will be executed when the timeout occurs. */
@@ -102,7 +102,7 @@ struct gcip_mailbox_async_response {
void *data;
/*
* The callback for releasing the @data.
- * It will be set as @release_async_resp_data of struct gcip_mailbox_ops.
+ * It will be set as @release_awaiter_data of struct gcip_mailbox_ops.
*/
void (*release_data)(void *data);
};
@@ -282,39 +282,39 @@ struct gcip_mailbox_ops {
bool (*before_handle_resp)(struct gcip_mailbox *mailbox, const void *resp);
/*
* Handles the asynchronous response which arrives well. How to handle it depends on the
- * chip implementation. However, @async_resp should be released by calling the
- * `gcip_mailbox_release_async_resp` function when the kernel driver doesn't need
- * @async_resp anymore. This is called with the `wait_list_lock` being held.
+ * chip implementation. However, @awaiter should be released by calling the
+ * `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
+ * @awaiter anymore. This is called with the `wait_list_lock` being held.
* Context: normal and in_interrupt().
*/
- void (*handle_async_resp_arrived)(struct gcip_mailbox *mailbox,
- struct gcip_mailbox_async_response *async_resp);
+ void (*handle_awaiter_arrived)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
/*
* Handles the timed out asynchronous response. How to handle it depends on the chip
- * implementation. However, @async_resp should be released by calling the
- * `gcip_mailbox_release_async_resp` function when the kernel driver doesn't need
- * @async_resp anymore. This is called without holding any locks.
+ * implementation. However, @awaiter should be released by calling the
+ * `gcip_mailbox_release_awaiter` function when the kernel driver doesn't need
+ * @awaiter anymore. This is called without holding any locks.
* Context: normal and in_interrupt().
*/
- void (*handle_async_resp_timedout)(struct gcip_mailbox *mailbox,
- struct gcip_mailbox_async_response *async_resp);
+ void (*handle_awaiter_timedout)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
/*
* Cleans up asynchronous response which is not arrived yet, but also not timed out.
- * The @async_resp should be marked as unprocessable to make it not to be processed by
- * the `handle_async_resp_arrived` or `handle_async_resp_timedout` callbacks in race
- * conditions. Don't have to release @async_resp of this function by calling the
- * `gcip_mailbox_release_async_resp` function. It will be released internally. This is
+ * The @awaiter should be marked as unprocessable to make it not to be processed by
+ * the `handle_awaiter_arrived` or `handle_awaiter_timedout` callbacks in race
+ * conditions. Don't have to release @awaiter of this function by calling the
+ * `gcip_mailbox_release_awaiter` function. It will be released internally. This is
* called with the `wait_list_lock` being held.
* Context: normal.
*/
- void (*flush_async_resp)(struct gcip_mailbox *mailbox,
- struct gcip_mailbox_async_response *async_resp);
+ void (*flush_awaiter)(struct gcip_mailbox *mailbox,
+ struct gcip_mailbox_resp_awaiter *awaiter);
/*
* Releases the @data which was passed to the `gcip_mailbox_put_cmd` function. This is
* called without holding any locks.
* Context: normal and in_interrupt().
*/
- void (*release_async_resp_data)(void *data);
+ void (*release_awaiter_data)(void *data);
};
struct gcip_mailbox {
@@ -399,35 +399,35 @@ int gcip_mailbox_send_cmd(struct gcip_mailbox *mailbox, void *cmd, void *resp);
/*
* Executes @cmd command asynchronously. This function returns an instance of
- * `struct gcip_mailbox_async_response` which handles the arrival and time-out of the response.
+ * `struct gcip_mailbox_resp_awaiter` which handles the arrival and time-out of the response.
* The implementation side can cancel the asynchronous response by calling the
- * `gcip_mailbox_cancel_async_resp_timeout` function with it.
+ * `gcip_mailbox_cancel_awaiter_timeout` function with it.
*
- * Arrived asynchronous response will be handled by `handle_async_resp` callback and timed out
- * asynchronous response will be handled by `handle_async_resp_timedout` callback. Those callbacks
- * will pass the @async_resp as a parameter which is the same with the return of this function.
+ * Arrived asynchronous response will be handled by `handle_awaiter_arrived` callback and timed out
+ * asynchronous response will be handled by `handle_awaiter_timedout` callback. Those callbacks
+ * will pass the @awaiter as a parameter which is the same with the return of this function.
* The response can be accessed from `resp` member of it. Also, the @data passed to this function
- * can be accessed from `data` member variable of it. The @async_resp must be released by calling
- * the `gcip_mailbox_release_async_resp` function when it is not needed anymore.
+ * can be accessed from `data` member variable of it. The @awaiter must be released by calling
+ * the `gcip_mailbox_release_awaiter` function when it is not needed anymore.
*
* If the mailbox is released before the response arrives, all the waiting asynchronous responses
- * will be flushed. In this case, the `flush_async_resp` callback will be called for that response
- * and @async_resp don't have to be released by the implementation side.
- * (i.e, the `gcip_mailbox_release_async_resp` function will be called internally.)
+ * will be flushed. In this case, the `flush_awaiter` callback will be called for that response
+ * and @awaiter don't have to be released by the implementation side.
+ * (i.e, the `gcip_mailbox_release_awaiter` function will be called internally.)
*
- * The caller defines the way of cleaning up the @data to the `release_async_resp_data` callback.
- * This callback will be called when the `gcip_mailbox_release_async_resp` function is called or
+ * The caller defines the way of cleaning up the @data to the `release_awaiter_data` callback.
+ * This callback will be called when the `gcip_mailbox_release_awaiter` function is called or
* the response is flushed.
*
* If this function fails to request the command, it will return the error pointer. In this case,
- * the caller should free @data explicitly. (i.e, the callback `release_async_resp_data` will not
+ * the caller should free @data explicitly. (i.e, the callback `release_awaiter_data` will not
* be.)
*
* Note: the asynchronous responses fetched from @resp_queue should be released by calling the
- * `gcip_mailbox_release_async_resp` function.
+ * `gcip_mailbox_release_awaiter` function.
*/
-struct gcip_mailbox_async_response *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
- void *resp, void *data);
+struct gcip_mailbox_resp_awaiter *gcip_mailbox_put_cmd(struct gcip_mailbox *mailbox, void *cmd,
+ void *resp, void *data);
/*
* Cancels the timeout work of the asynchronous response. In normally, the response arrives and
@@ -439,13 +439,13 @@ struct gcip_mailbox_async_response *gcip_mailbox_put_cmd(struct gcip_mailbox *ma
*
* Note: this function will cancel the timeout work synchronously.
*/
-void gcip_mailbox_cancel_async_resp_timeout(struct gcip_mailbox_async_response *async_resp);
+void gcip_mailbox_cancel_awaiter_timeout(struct gcip_mailbox_resp_awaiter *awaiter);
/*
- * Releases @async_resp. Every fetched (arrived or timed out) asynchronous responses should be
- * released by calling this. It will call the `release_async_resp_data` callback internally.
+ * Releases @awaiter. Every fetched (arrived or timed out) asynchronous responses should be
+ * released by calling this. It will call the `release_awaiter_data` callback internally.
*/
-void gcip_mailbox_release_async_resp(struct gcip_mailbox_async_response *async_resp);
+void gcip_mailbox_release_awaiter(struct gcip_mailbox_resp_awaiter *awaiter);
/*
* Consume one response and handle it. This can be used for consuming one response quickly and then
diff --git a/drivers/edgetpu/mobile-pm.c b/drivers/edgetpu/mobile-pm.c
index 354e373..8ad78e8 100644
--- a/drivers/edgetpu/mobile-pm.c
+++ b/drivers/edgetpu/mobile-pm.c
@@ -24,10 +24,6 @@
#include "edgetpu-pm.c"
#include "edgetpu-soc.h"
-static int power_state = TPU_DEFAULT_POWER_STATE;
-
-module_param(power_state, int, 0660);
-
enum edgetpu_pwr_state edgetpu_active_states[EDGETPU_NUM_STATES] = {
TPU_ACTIVE_UUD,
TPU_ACTIVE_SUD,
@@ -37,46 +33,16 @@ enum edgetpu_pwr_state edgetpu_active_states[EDGETPU_NUM_STATES] = {
uint32_t *edgetpu_states_display = edgetpu_active_states;
-static int mobile_pwr_state_init(struct device *dev)
-{
- int ret;
- int curr_state;
-
- pm_runtime_enable(dev);
- curr_state = edgetpu_soc_pm_get_rate(0);
-
- if (curr_state > TPU_OFF) {
- ret = pm_runtime_get_sync(dev);
- if (ret) {
- pm_runtime_put_noidle(dev);
- dev_err(dev, "pm_runtime_get_sync returned %d\n", ret);
- return ret;
- }
- }
-
- ret = edgetpu_soc_pm_set_init_freq(curr_state);
- if (ret) {
- dev_err(dev, "error initializing tpu state: %d\n", ret);
- if (curr_state > TPU_OFF)
- pm_runtime_put_sync(dev);
- return ret;
- }
-
- return ret;
-}
-
static int mobile_pwr_state_set_locked(struct edgetpu_mobile_platform_dev *etmdev, u64 val)
{
int ret;
- int curr_state;
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
struct device *dev = etdev->dev;
- curr_state = edgetpu_soc_pm_get_rate(0);
-
- dev_dbg(dev, "Power state %d -> %llu\n", curr_state, val);
+ dev_dbg(dev, "Power state to %llu\n", val);
- if (curr_state == TPU_OFF && val > TPU_OFF) {
+ if (val > TPU_OFF && (!platform_pwr->is_block_down || platform_pwr->is_block_down(etdev))) {
ret = pm_runtime_get_sync(dev);
if (ret) {
pm_runtime_put_noidle(dev);
@@ -92,7 +58,8 @@ static int mobile_pwr_state_set_locked(struct edgetpu_mobile_platform_dev *etmde
return ret;
}
- if (curr_state != TPU_OFF && val == TPU_OFF) {
+ if (val == TPU_OFF &&
+ (!platform_pwr->is_block_down || !platform_pwr->is_block_down(etdev))) {
ret = pm_runtime_put_sync(dev);
if (ret) {
dev_err(dev, "%s: pm_runtime_put_sync returned %d\n", __func__, ret);
@@ -108,7 +75,7 @@ static int mobile_pwr_state_get_locked(void *data, u64 *val)
struct edgetpu_dev *etdev = (typeof(etdev))data;
struct device *dev = etdev->dev;
- *val = edgetpu_soc_pm_get_rate(0);
+ *val = edgetpu_soc_pm_get_rate(etdev, 0);
dev_dbg(dev, "current tpu state: %llu\n", *val);
return 0;
@@ -212,27 +179,6 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_pwr_state, mobile_pwr_state_get, mobile_pwr_st
DEFINE_DEBUGFS_ATTRIBUTE(fops_tpu_min_pwr_state, mobile_min_pwr_state_get, mobile_min_pwr_state_set,
"%llu\n");
-static int mobile_get_initial_pwr_state(struct device *dev)
-{
- switch (power_state) {
- case TPU_ACTIVE_UUD:
- case TPU_ACTIVE_SUD:
- case TPU_ACTIVE_UD:
- case TPU_ACTIVE_NOM:
- dev_info(dev, "Initial power state: %d\n", power_state);
- break;
- case TPU_OFF:
- dev_warn(dev, "Power state %d prevents control core booting", power_state);
- fallthrough;
- default:
- dev_warn(dev, "Power state %d is invalid\n", power_state);
- dev_warn(dev, "defaulting to active nominal\n");
- power_state = TPU_ACTIVE_NOM;
- break;
- }
- return power_state;
-}
-
static int mobile_power_down(struct edgetpu_pm *etpm);
static int mobile_power_up(struct edgetpu_pm *etpm)
@@ -245,12 +191,14 @@ static int mobile_power_up(struct edgetpu_pm *etpm)
if (platform_pwr->is_block_down && !platform_pwr->is_block_down(etdev))
return -EAGAIN;
- ret = mobile_pwr_state_set(etpm->etdev, mobile_get_initial_pwr_state(etdev->dev));
-
etdev_info(etpm->etdev, "Powering up\n");
- if (ret)
+ ret = pm_runtime_get_sync(etdev->dev);
+ if (ret) {
+ pm_runtime_put_noidle(etdev->dev);
+ etdev_err(etdev, "pm_runtime_get_sync returned %d\n", ret);
return ret;
+ }
if (platform_pwr->lpm_up)
platform_pwr->lpm_up(etdev);
@@ -320,7 +268,6 @@ static int mobile_power_down(struct edgetpu_pm *etpm)
struct edgetpu_dev *etdev = etpm->etdev;
struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
- u64 val;
int res = 0;
int min_state = platform_pwr->min_state;
@@ -331,11 +278,7 @@ static int mobile_power_down(struct edgetpu_pm *etpm)
return 0;
}
- if (mobile_pwr_state_get(etdev, &val)) {
- etdev_warn(etdev, "Failed to read current power state\n");
- val = TPU_ACTIVE_NOM;
- }
- if (val == TPU_OFF) {
+ if (platform_pwr->is_block_down && platform_pwr->is_block_down(etdev)) {
etdev_dbg(etdev, "Device already off, skipping shutdown\n");
return 0;
}
@@ -366,7 +309,11 @@ static int mobile_power_down(struct edgetpu_pm *etpm)
etdev_warn(etdev, "CPU reset request failed (%d)\n", res);
}
- mobile_pwr_state_set(etdev, TPU_OFF);
+ res = pm_runtime_put_sync(etdev->dev);
+ if (res) {
+ etdev_err(etdev, "pm_runtime_put_sync returned %d\n", res);
+ return res;
+ }
edgetpu_soc_pm_power_down(etdev);
@@ -389,16 +336,18 @@ static int mobile_pm_after_create(struct edgetpu_pm *etpm)
struct device *dev = etdev->dev;
struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
- ret = mobile_pwr_state_init(dev);
- if (ret)
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ pm_runtime_put_noidle(dev);
+ dev_err(dev, "pm_runtime_get_sync returned %d\n", ret);
return ret;
+ }
mutex_init(&platform_pwr->policy_lock);
mutex_init(&platform_pwr->state_lock);
- ret = mobile_pwr_state_set(etdev, mobile_get_initial_pwr_state(dev));
- if (ret)
- return ret;
platform_pwr->debugfs_dir = debugfs_create_dir("power", edgetpu_fs_debugfs_dir());
if (IS_ERR_OR_NULL(platform_pwr->debugfs_dir)) {
dev_warn(etdev->dev, "Failed to create debug FS power");
diff --git a/drivers/edgetpu/mobile-soc-gsx01.c b/drivers/edgetpu/mobile-soc-gsx01.c
index 658dd44..f194b77 100644
--- a/drivers/edgetpu/mobile-soc-gsx01.c
+++ b/drivers/edgetpu/mobile-soc-gsx01.c
@@ -6,6 +6,7 @@
*/
#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/gsa/gsa_tpu.h>
#include <linux/platform_device.h>
#include <linux/thermal.h>
@@ -55,6 +56,11 @@
#define SSMT_BYPASS (1 << 31)
+#define PLL_CON3_OFFSET 0x10c
+#define PLL_DIV_M_POS 16
+#define PLL_DIV_M_WIDTH 10
+#define TO_PLL_DIV_M(val) (((val) >> PLL_DIV_M_POS) & (BIT(PLL_DIV_M_WIDTH) - 1))
+
static int gsx01_parse_ssmt(struct edgetpu_mobile_platform_dev *etmdev)
{
struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
@@ -94,6 +100,31 @@ static int gsx01_parse_ssmt(struct edgetpu_mobile_platform_dev *etmdev)
return 0;
}
+static int gsx01_parse_cmu(struct edgetpu_mobile_platform_dev *etmdev)
+{
+ struct edgetpu_dev *etdev = &etmdev->edgetpu_dev;
+ struct platform_device *pdev = to_platform_device(etdev->dev);
+ struct edgetpu_soc_data *soc_data = etdev->soc_data;
+ struct resource *res;
+ void __iomem *cmu_base;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cmu");
+ if (!res) {
+ etdev_warn(etdev, "Failed to find CMU register base");
+ return -EINVAL;
+ }
+ cmu_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(cmu_base)) {
+ ret = PTR_ERR(cmu_base);
+ etdev_warn(etdev, "Failed to map CMU register base: %d", ret);
+ return ret;
+ }
+ soc_data->cmu_base = cmu_base;
+
+ return 0;
+}
+
int edgetpu_soc_init(struct edgetpu_dev *etdev)
{
struct platform_device *pdev = to_platform_device(etdev->dev);
@@ -108,6 +139,11 @@ int edgetpu_soc_init(struct edgetpu_dev *etdev)
ret = gsx01_parse_ssmt(etmdev);
if (ret)
dev_warn(etdev->dev, "SSMT setup failed (%d). Context isolation not enforced", ret);
+
+ ret = gsx01_parse_cmu(etmdev);
+ if (ret)
+ dev_warn(etdev->dev, "CMU setup failed (%d). Can't query TPU core frequency.", ret);
+
return 0;
}
@@ -261,21 +297,62 @@ void edgetpu_soc_handle_reverse_kci(struct edgetpu_dev *etdev,
static unsigned long edgetpu_pm_rate;
-long edgetpu_soc_pm_get_rate(int flags)
+long edgetpu_soc_pm_get_rate(struct edgetpu_dev *etdev, int flags)
{
- return edgetpu_pm_rate;
-}
+ struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
+ struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
+ void __iomem *cmu_base = etdev->soc_data->cmu_base;
+ long curr_state;
+ u32 pll_con3;
-int edgetpu_soc_pm_set_rate(unsigned long rate)
-{
- edgetpu_pm_rate = rate;
+ if (IS_ENABLED(CONFIG_EDGETPU_TEST))
+ return edgetpu_pm_rate;
- return 0;
+ if (!cmu_base)
+ return -EINVAL;
+
+ if (!platform_pwr->is_block_down)
+ etdev_warn(etdev,
+ "Querying the CMU PLL register when TPU_OFF might lead to crash.");
+ else if (platform_pwr->is_block_down(etdev))
+ return 0;
+
+ pll_con3 = readl(cmu_base + PLL_CON3_OFFSET);
+
+ /*
+ * Below values must match the CMU PLL (pll_con3_pll_tpu) values in the spec and firmware.
+ * See https://drive.google.com/file/d/16S9yxmGwkOltdO2w4dC8tpAt99chn-aq/view and
+ * power_manager.cc for more details.
+ */
+ switch (TO_PLL_DIV_M(pll_con3)) {
+ case 221:
+ curr_state = TPU_ACTIVE_UUD;
+ break;
+ case 153:
+ curr_state = TPU_ACTIVE_SUD;
+ break;
+ case 206:
+ curr_state = TPU_ACTIVE_UD;
+ break;
+ case 182:
+ curr_state = TPU_ACTIVE_NOM;
+ break;
+ default:
+ etdev_err(etdev, "Invalid DIV_M read from PLL: %lu\n", TO_PLL_DIV_M(pll_con3));
+ curr_state = -EINVAL;
+ }
+
+ etdev_dbg(etdev, "current tpu state: %ld\n", curr_state);
+
+ return curr_state;
}
-int edgetpu_soc_pm_set_init_freq(unsigned long freq)
+int edgetpu_soc_pm_set_rate(unsigned long rate)
{
- return 0;
+ if (IS_ENABLED(CONFIG_EDGETPU_TEST))
+ edgetpu_pm_rate = rate;
+
+ return -EOPNOTSUPP;
}
int edgetpu_soc_pm_set_policy(u64 val)
@@ -285,7 +362,10 @@ int edgetpu_soc_pm_set_policy(u64 val)
static int edgetpu_core_rate_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_CLK_CORE_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_CLK_CORE_DEBUG);
+
return 0;
}
@@ -301,7 +381,10 @@ static int edgetpu_core_rate_set(void *data, u64 val)
static int edgetpu_ctl_rate_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_CLK_CTL_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_CLK_CTL_DEBUG);
+
return 0;
}
@@ -317,7 +400,10 @@ static int edgetpu_ctl_rate_set(void *data, u64 val)
static int edgetpu_axi_rate_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_CLK_AXI_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_CLK_AXI_DEBUG);
+
return 0;
}
@@ -333,23 +419,29 @@ static int edgetpu_axi_rate_set(void *data, u64 val)
static int edgetpu_apb_rate_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_CLK_APB_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_CLK_APB_DEBUG);
+
return 0;
}
static int edgetpu_uart_rate_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_CLK_UART_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_CLK_UART_DEBUG);
+
return 0;
}
static int edgetpu_vdd_int_m_set(void *data, u64 val)
{
- struct device *dev = (struct device *)data;
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
unsigned long dbg_rate_req;
if (val > MAX_VOLTAGE_VAL) {
- dev_err(dev, "Preventing INT_M voltage > %duV", MAX_VOLTAGE_VAL);
+ etdev_err(etdev, "Preventing INT_M voltage > %duV", MAX_VOLTAGE_VAL);
return -EINVAL;
}
@@ -361,18 +453,21 @@ static int edgetpu_vdd_int_m_set(void *data, u64 val)
static int edgetpu_vdd_int_m_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_VDD_INT_M_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_VDD_INT_M_DEBUG);
+
return 0;
}
static int edgetpu_vdd_tpu_set(void *data, u64 val)
{
int ret;
- struct device *dev = (struct device *)data;
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
unsigned long dbg_rate_req;
if (val > MAX_VOLTAGE_VAL) {
- dev_err(dev, "Preventing VDD_TPU voltage > %duV", MAX_VOLTAGE_VAL);
+ etdev_err(etdev, "Preventing VDD_TPU voltage > %duV", MAX_VOLTAGE_VAL);
return -EINVAL;
}
@@ -385,18 +480,21 @@ static int edgetpu_vdd_tpu_set(void *data, u64 val)
static int edgetpu_vdd_tpu_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_VDD_TPU_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_VDD_TPU_DEBUG);
+
return 0;
}
static int edgetpu_vdd_tpu_m_set(void *data, u64 val)
{
int ret;
- struct device *dev = (struct device *)data;
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
unsigned long dbg_rate_req;
if (val > MAX_VOLTAGE_VAL) {
- dev_err(dev, "Preventing VDD_TPU voltage > %duV", MAX_VOLTAGE_VAL);
+ etdev_err(etdev, "Preventing VDD_TPU voltage > %duV", MAX_VOLTAGE_VAL);
return -EINVAL;
}
@@ -409,7 +507,10 @@ static int edgetpu_vdd_tpu_m_set(void *data, u64 val)
static int edgetpu_vdd_tpu_m_get(void *data, u64 *val)
{
- *val = edgetpu_soc_pm_get_rate(TPU_DEBUG_REQ | TPU_VDD_TPU_M_DEBUG);
+ struct edgetpu_dev *etdev = (typeof(etdev))data;
+
+ *val = edgetpu_soc_pm_get_rate(etdev, TPU_DEBUG_REQ | TPU_VDD_TPU_M_DEBUG);
+
return 0;
}
@@ -436,7 +537,6 @@ void edgetpu_soc_pm_power_down(struct edgetpu_dev *etdev)
int edgetpu_soc_pm_init(struct edgetpu_dev *etdev)
{
- struct device *dev = etdev->dev;
struct edgetpu_mobile_platform_dev *etmdev = to_mobile_dev(etdev);
struct edgetpu_mobile_platform_pwr *platform_pwr = &etmdev->platform_pwr;
@@ -448,14 +548,18 @@ int edgetpu_soc_pm_init(struct edgetpu_dev *etdev)
dev_warn(etdev->dev, "tpu_performance BTS scenario not found\n");
etdev->soc_data->scenario_count = 0;
- debugfs_create_file("vdd_tpu", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_vdd_tpu);
- debugfs_create_file("vdd_tpu_m", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_vdd_tpu_m);
- debugfs_create_file("vdd_int_m", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_vdd_int_m);
- debugfs_create_file("core_rate", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_core_rate);
- debugfs_create_file("ctl_rate", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_ctl_rate);
- debugfs_create_file("axi_rate", 0660, platform_pwr->debugfs_dir, dev, &fops_tpu_axi_rate);
- debugfs_create_file("apb_rate", 0440, platform_pwr->debugfs_dir, dev, &fops_tpu_apb_rate);
- debugfs_create_file("uart_rate", 0440, platform_pwr->debugfs_dir, dev, &fops_tpu_uart_rate);
+ debugfs_create_file("vdd_tpu", 0660, platform_pwr->debugfs_dir, etdev, &fops_tpu_vdd_tpu);
+ debugfs_create_file("vdd_tpu_m", 0660, platform_pwr->debugfs_dir, etdev,
+ &fops_tpu_vdd_tpu_m);
+ debugfs_create_file("vdd_int_m", 0660, platform_pwr->debugfs_dir, etdev,
+ &fops_tpu_vdd_int_m);
+ debugfs_create_file("core_rate", 0660, platform_pwr->debugfs_dir, etdev,
+ &fops_tpu_core_rate);
+ debugfs_create_file("ctl_rate", 0660, platform_pwr->debugfs_dir, etdev, &fops_tpu_ctl_rate);
+ debugfs_create_file("axi_rate", 0660, platform_pwr->debugfs_dir, etdev, &fops_tpu_axi_rate);
+ debugfs_create_file("apb_rate", 0440, platform_pwr->debugfs_dir, etdev, &fops_tpu_apb_rate);
+ debugfs_create_file("uart_rate", 0440, platform_pwr->debugfs_dir, etdev,
+ &fops_tpu_uart_rate);
return 0;
}
diff --git a/drivers/edgetpu/mobile-soc-gsx01.h b/drivers/edgetpu/mobile-soc-gsx01.h
index d07d1d4..9d32884 100644
--- a/drivers/edgetpu/mobile-soc-gsx01.h
+++ b/drivers/edgetpu/mobile-soc-gsx01.h
@@ -20,6 +20,8 @@ struct edgetpu_soc_data {
void __iomem **ssmt_base;
/* Number of SSMTs */
uint num_ssmts;
+ /* Virtual address of the CMU block for this chip. */
+ void __iomem *cmu_base;
/* INT/MIF requests for memory bandwidth */
struct exynos_pm_qos_request int_min;
struct exynos_pm_qos_request mif_min;
diff --git a/drivers/edgetpu/mobile-thermal.c b/drivers/edgetpu/mobile-thermal.c
index 457cf9f..c8fb0a2 100644
--- a/drivers/edgetpu/mobile-thermal.c
+++ b/drivers/edgetpu/mobile-thermal.c
@@ -179,7 +179,7 @@ static int edgetpu_get_requested_power(struct thermal_cooling_device *cdev,
unsigned long state_original;
struct edgetpu_thermal *cooling = cdev->devdata;
- state_original = edgetpu_soc_pm_get_rate(0);
+ state_original = edgetpu_soc_pm_get_rate(cooling->etdev, 0);
return edgetpu_state2power_internal(state_original, power, cooling);
}
diff --git a/drivers/edgetpu/rio-pm.c b/drivers/edgetpu/rio-pm.c
index a471cb7..0b6aa45 100644
--- a/drivers/edgetpu/rio-pm.c
+++ b/drivers/edgetpu/rio-pm.c
@@ -16,8 +16,6 @@
#include "mobile-soc-gsx01.h"
#include "mobile-pm.h"
-#define TPU_DEFAULT_POWER_STATE TPU_ACTIVE_NOM
-
#include "mobile-pm.c"
#define SHUTDOWN_DELAY_US_MIN 20
@@ -149,10 +147,6 @@ static bool rio_is_block_down(struct edgetpu_dev *etdev)
int timeout_cnt = 0;
int curr_state;
- if (!etmdev->pmu_status)
- /* Rely on pm_runtime for now */
- return true;
-
do {
/* Delay 20us per retry till blk shutdown finished */
usleep_range(SHUTDOWN_DELAY_US_MIN, SHUTDOWN_DELAY_US_MAX);
@@ -180,7 +174,8 @@ int edgetpu_chip_pm_create(struct edgetpu_dev *etdev)
platform_pwr->lpm_up = rio_lpm_up;
platform_pwr->lpm_down = rio_lpm_down;
- platform_pwr->is_block_down = rio_is_block_down;
+ if (etmdev->pmu_status)
+ platform_pwr->is_block_down = rio_is_block_down;
platform_pwr->post_fw_start = rio_post_fw_start;
return edgetpu_mobile_pm_create(etdev);