summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAurora pro automerger <aurora-pro-automerger@google.com>2022-07-19 08:41:41 +0000
committerJohn Scheible <johnscheible@google.com>2022-07-25 18:55:04 +0000
commit51345064a78c4fbee30e5c331a41c0dfab852fa0 (patch)
treec49363432f8d27d687608f9475be184f10b28ac2
parent965bb96998a383ae25c88da8c095f375bfb5e240 (diff)
downloadgs201-51345064a78c4fbee30e5c331a41c0dfab852fa0.tar.gz
[Copybara Auto Merge] Merge branch 'gs201-release' into 'android13-gs-pixel-5.10-tm-qpr1'
Revert "gxp: avoid holding semaphore during mmap" Revert "gxp: Disable telemetry before free" gxp: fix deadlock on power states queue full Bug: 236087752 gxp: warn deprecate pwr states only once Bug: 237337595 gxp: don't modify clkmux state during core booting Bug: 238960149 gxp: increase the waiting time for cores booting Bug: 237378056 gxp: Disable telemetry before free Bug: 235771175 gxp: avoid holding semaphore during mmap Bug: 232183143 gxp: parallelize the firmware startup process Bug: 207036666 gxp: hold mmap lock around call to find_extend_vma Bug: 237404338 gxp: Fix bug when clearing FW buffers on auth fail Bug: 237789581 gxp: deprecate NON_AGGRESSOR / add LOW_FREQ_CLKMUX Bug: 237337595 (repeat) Bug: 237378056 (repeat) GitOrigin-RevId: 75508d46deac85970c4d556770c105eaa4f2a2e8 Change-Id: Id50f1b68834e687178d66e0d239d1be42e0e2152
-rw-r--r--gxp-client.c4
-rw-r--r--gxp-client.h2
-rw-r--r--gxp-firmware.c199
-rw-r--r--gxp-firmware.h24
-rw-r--r--gxp-mailbox.c16
-rw-r--r--gxp-mailbox.h9
-rw-r--r--gxp-mapping.c3
-rw-r--r--gxp-platform.c59
-rw-r--r--gxp-pm.c219
-rw-r--r--gxp-pm.h54
-rw-r--r--gxp-vd.c69
-rw-r--r--gxp.h59
12 files changed, 409 insertions, 308 deletions
diff --git a/gxp-client.c b/gxp-client.c
index 87e911a..6bdebfc 100644
--- a/gxp-client.c
+++ b/gxp-client.c
@@ -32,7 +32,7 @@ struct gxp_client *gxp_client_create(struct gxp_dev *gxp)
client->requested_memory_power_state = 0;
client->vd = NULL;
client->tpu_mbx_allocated = false;
- client->requested_aggressor = false;
+ client->requested_low_clkmux = false;
return client;
}
@@ -65,7 +65,7 @@ void gxp_client_destroy(struct gxp_client *client)
gxp_wakelock_release(client->gxp);
gxp_pm_update_requested_power_states(
gxp, client->requested_power_state,
- client->requested_aggressor, AUR_OFF, true,
+ client->requested_low_clkmux, AUR_OFF, false,
client->requested_memory_power_state,
AUR_MEM_UNDEFINED);
}
diff --git a/gxp-client.h b/gxp-client.h
index fee0b9b..c0730e9 100644
--- a/gxp-client.h
+++ b/gxp-client.h
@@ -33,7 +33,7 @@ struct gxp_client {
uint requested_power_state;
/* Value is one of the MEMORY_POWER_STATE_* values from gxp.h. */
uint requested_memory_power_state;
- bool requested_aggressor;
+ bool requested_low_clkmux;
struct gxp_virtual_device *vd;
bool tpu_mbx_allocated;
diff --git a/gxp-firmware.c b/gxp-firmware.c
index 29b00e7..d9e6cc6 100644
--- a/gxp-firmware.c
+++ b/gxp-firmware.c
@@ -239,6 +239,7 @@ gxp_firmware_authenticate(struct gxp_dev *gxp,
if (ret) {
dev_err(gxp->dev, "GSA authentication failed: %d\n",
ret);
+ memset_io(buffer->vaddr, 0, buffer->size);
goto error;
}
}
@@ -250,14 +251,10 @@ error:
* Zero out firmware buffers if we got a authentication failure on any
* core.
*/
- for (core -= 1; core >= 0; core--)
- /*
- * TODO(b/237789581) Only the image which failed auth is being
- * zeroed out here. This is benign since the other buffers will
- * be zeroed-out next call, but should still be fixed.
- * The fix is ready upstream and will be in the next release.
- */
+ for (core -= 1; core >= 0; core--) {
+ buffer = &gxp->fwbufs[core];
memset_io(buffer->vaddr, 0, buffer->size);
+ }
return ret;
}
@@ -333,14 +330,6 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
void __iomem *core_scratchpad_base;
int ctr;
- /* Raise wakeup doorbell */
- dev_notice(gxp->dev, "Raising doorbell %d interrupt\n",
- CORE_WAKEUP_DOORBELL(core));
-#ifndef CONFIG_GXP_GEM5
- gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(core), core);
-#endif
- gxp_doorbell_set(gxp, CORE_WAKEUP_DOORBELL(core));
-
/* Wait for core to come up */
dev_notice(gxp->dev, "Waiting for core %u to power up...\n", core);
ctr = 1000;
@@ -370,7 +359,7 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
*/
ctr = 5000;
offset = SCRATCHPAD_MSG_OFFSET(MSG_CORE_ALIVE);
- usleep_range(500 * GXP_TIME_DELAY_FACTOR, 1000 * GXP_TIME_DELAY_FACTOR);
+ usleep_range(50 * GXP_TIME_DELAY_FACTOR, 60 * GXP_TIME_DELAY_FACTOR);
while (ctr--) {
if (readl(core_scratchpad_base + offset) == Q7_ALIVE_MAGIC)
break;
@@ -385,15 +374,14 @@ static int gxp_firmware_handshake(struct gxp_dev *gxp, uint core)
#ifndef CONFIG_GXP_GEM5
/*
- * Currently, the hello_world FW reads the INT_MASK0 register
- * (written by the driver) to validate TOP access. The value
- * read is echoed back by the FW to offset MSG_TOP_ACCESS_OK in
- * the scratchpad space, which must be compared to the value
- * written in the INT_MASK0 register by the driver for
- * confirmation.
+ * FW reads the INT_MASK0 register (written by the driver) to
+ * validate TOP access. The value read is echoed back by the FW to
+ * offset MSG_TOP_ACCESS_OK in the scratchpad space, which must be
+ * compared to the value written in the INT_MASK0 register by the
+ * driver for confirmation.
* On Gem5, FW will start early when lpm is up. This behavior will
- * affect the order of reading/writing INT_MASK0, so ignore this
- * handshaking in Gem5.
+ * affect the order of reading/writing INT_MASK0, so ignore these
+ * handshakes in Gem5.
*/
ctr = 1000;
offset = SCRATCHPAD_MSG_OFFSET(MSG_TOP_ACCESS_OK);
@@ -684,11 +672,9 @@ err_authenticate_firmware:
return ret;
}
-int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core, uint core)
+static int gxp_firmware_setup(struct gxp_dev *gxp, uint core)
{
int ret = 0;
- struct work_struct *work;
if (gxp->firmware_running & BIT(core)) {
dev_err(gxp->dev, "Firmware is already running on core %u\n",
@@ -705,35 +691,46 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
/* Mark this as a cold boot */
gxp_firmware_set_boot_mode(gxp, core, GXP_BOOT_MODE_REQUEST_COLD_BOOT);
-#ifdef CONFIG_GXP_GEM5
- /*
- * GEM5 starts firmware after LPM is programmed, so we need to call
- * gxp_doorbell_enable_for_core() here to set GXP_REG_COMMON_INT_MASK_0
- * first to enable the firmware hadnshaking.
- */
- gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(core), core);
-#endif
ret = gxp_firmware_setup_hw_after_block_off(gxp, core,
/*verbose=*/true);
if (ret) {
dev_err(gxp->dev, "Failed to power up core %u\n", core);
- goto out_firmware_unload;
+ gxp_firmware_unload(gxp, core);
+ }
+
+ return ret;
+}
+
+static void gxp_firmware_wakeup_cores(struct gxp_dev *gxp, uint core_list)
+{
+ uint core;
+
+ /* Raise wakeup doorbell */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(core_list & BIT(core)))
+ continue;
+#ifndef CONFIG_GXP_GEM5
+ gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(core),
+ core);
+#endif
+ gxp_doorbell_set(gxp, CORE_WAKEUP_DOORBELL(core));
}
+}
+
+static int gxp_firmware_finish_startup(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core, uint core)
+{
+ int ret = 0;
+ struct work_struct *work;
- /* Switch PLL_CON0_NOC_USER MUX to the normal state to guarantee LPM works */
- gxp_pm_force_cmu_noc_user_mux_normal(gxp);
ret = gxp_firmware_handshake(gxp, core);
if (ret) {
dev_err(gxp->dev, "Firmware handshake failed on core %u\n",
core);
gxp_pm_core_off(gxp, core);
- goto out_check_noc_user_mux;
+ goto out_firmware_unload;
}
- /*
- * Check if we need to set PLL_CON0_NOC_USER MUX to low state for
- * AUR_READY requested state.
- */
- gxp_pm_check_cmu_noc_user_mux(gxp);
/* Initialize mailbox */
gxp->mailbox_mgr->mailboxes[core] =
@@ -761,22 +758,14 @@ int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
return ret;
-out_check_noc_user_mux:
- gxp_pm_check_cmu_noc_user_mux(gxp);
out_firmware_unload:
gxp_firmware_unload(gxp, core);
return ret;
}
-int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
- bool verbose)
-{
- gxp_program_reset_vector(gxp, core, verbose);
- return gxp_pm_core_on(gxp, core, verbose);
-}
-
-void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core, uint core)
+static void gxp_firmware_stop_core(struct gxp_dev *gxp,
+ struct gxp_virtual_device *vd,
+ uint virt_core, uint core)
{
if (!(gxp->firmware_running & BIT(core)))
dev_err(gxp->dev, "Firmware is not running on core %u\n", core);
@@ -797,6 +786,106 @@ void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
gxp_firmware_unload(gxp, core);
}
+int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint core_list)
+{
+ int ret;
+ uint core, virt_core;
+ uint failed_cores = 0;
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (core_list & BIT(core)) {
+ ret = gxp_firmware_setup(gxp, core);
+ if (ret) {
+ failed_cores |= BIT(core);
+ dev_err(gxp->dev, "Failed to run firmware on core %u\n",
+ core);
+ }
+ }
+ }
+ if (failed_cores != 0) {
+ /*
+ * Shut down the cores which call `gxp_firmware_setup`
+ * successfully
+ */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (core_list & BIT(core)) {
+ if (!(failed_cores & BIT(core))) {
+ gxp_pm_core_off(gxp, core);
+ gxp_firmware_unload(gxp, core);
+ }
+ }
+ }
+ goto out;
+ }
+#ifdef CONFIG_GXP_GEM5
+ /*
+ * GEM5 starts firmware after LPM is programmed, so we need to call
+ * gxp_doorbell_enable_for_core here to set GXP_REG_COMMON_INT_MASK_0
+ * first to enable the firmware handshakes.
+ */
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (!(core_list & BIT(core)))
+ continue;
+ gxp_doorbell_enable_for_core(gxp, CORE_WAKEUP_DOORBELL(core),
+ core);
+ }
+#endif
+ /* Switch clock mux to the normal state to guarantee LPM works */
+ gxp_pm_force_clkmux_normal(gxp);
+ gxp_firmware_wakeup_cores(gxp, core_list);
+ virt_core = 0;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (core_list & BIT(core)) {
+ ret = gxp_firmware_finish_startup(gxp, vd, virt_core,
+ core);
+ if (ret) {
+ failed_cores |= BIT(core);
+ dev_err(gxp->dev,
+ "Failed to run firmware on core %u\n",
+ core);
+ }
+ virt_core++;
+ }
+ }
+
+ if (failed_cores != 0) {
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (core_list & BIT(core)) {
+ if (!(failed_cores & BIT(core))) {
+ gxp_firmware_stop_core(gxp, vd,
+ virt_core, core);
+ }
+ }
+ }
+ }
+ /* Check if we need to set clock mux to low state as requested */
+ gxp_pm_resume_clkmux(gxp);
+out:
+ return ret;
+}
+
+int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
+ bool verbose)
+{
+ gxp_program_reset_vector(gxp, core, verbose);
+ return gxp_pm_core_on(gxp, core, verbose);
+}
+
+
+void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint core_list)
+{
+ uint core, virt_core = 0;
+
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (core_list & BIT(core)) {
+ gxp_firmware_stop_core(gxp, vd, virt_core, core);
+ virt_core++;
+ }
+ }
+}
+
void gxp_firmware_set_boot_mode(struct gxp_dev *gxp, uint core, u32 mode)
{
void __iomem *boot_mode_addr;
diff --git a/gxp-firmware.h b/gxp-firmware.h
index 835f8c9..008af5a 100644
--- a/gxp-firmware.h
+++ b/gxp-firmware.h
@@ -64,17 +64,6 @@ void gxp_fw_destroy(struct gxp_dev *gxp);
int gxp_firmware_request_if_needed(struct gxp_dev *gxp);
/*
- * Loads the firmware for the specified core in system memory and powers up the
- * core to start FW execution.
- */
-int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core, uint core);
-/*
- * Shuts down the specified core.
- */
-void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
- uint virt_core, uint core);
-/*
* Re-program the reset vector and power on the core's LPM if the block had
* been shut down.
*/
@@ -82,6 +71,19 @@ int gxp_firmware_setup_hw_after_block_off(struct gxp_dev *gxp, uint core,
bool verbose);
/*
+ * Loads the firmware for the cores in system memory and powers up the cores
+ * to start FW execution.
+ */
+int gxp_firmware_run(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint core_list);
+
+/*
+ * Shuts down the cores and releases the resources.
+ */
+void gxp_firmware_stop(struct gxp_dev *gxp, struct gxp_virtual_device *vd,
+ uint core_list);
+
+/*
* Sets the specified core's boot mode or suspend request value.
* This function should be called only after the firmware has been run.
*/
diff --git a/gxp-mailbox.c b/gxp-mailbox.c
index dcb0b2a..f925125 100644
--- a/gxp-mailbox.c
+++ b/gxp-mailbox.c
@@ -234,8 +234,8 @@ static void gxp_mailbox_handle_response(struct gxp_mailbox *mailbox,
gxp_pm_update_requested_power_states(
async_resp->mailbox->gxp,
async_resp->gxp_power_state,
- async_resp->requested_aggressor,
- AUR_OFF, true,
+ async_resp->requested_low_clkmux,
+ AUR_OFF, false,
async_resp->memory_power_state,
AUR_MEM_UNDEFINED);
@@ -828,7 +828,7 @@ static void async_cmd_timeout_work(struct work_struct *work)
gxp_pm_update_requested_power_states(
async_resp->mailbox->gxp, async_resp->gxp_power_state,
- async_resp->requested_aggressor, AUR_OFF, true,
+ async_resp->requested_low_clkmux, AUR_OFF, false,
async_resp->memory_power_state, AUR_MEM_UNDEFINED);
if (async_resp->eventfd) {
@@ -848,7 +848,7 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
spinlock_t *queue_lock,
wait_queue_head_t *queue_waitq,
uint gxp_power_state, uint memory_power_state,
- bool requested_aggressor,
+ bool requested_low_clkmux,
struct gxp_eventfd *eventfd)
{
struct gxp_async_response *async_resp;
@@ -864,7 +864,7 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
async_resp->dest_queue_waitq = queue_waitq;
async_resp->gxp_power_state = gxp_power_state;
async_resp->memory_power_state = memory_power_state;
- async_resp->requested_aggressor = requested_aggressor;
+ async_resp->requested_low_clkmux = requested_low_clkmux;
if (eventfd && gxp_eventfd_get(eventfd))
async_resp->eventfd = eventfd;
else
@@ -875,8 +875,8 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
msecs_to_jiffies(MAILBOX_TIMEOUT));
gxp_pm_update_requested_power_states(
- mailbox->gxp, AUR_OFF, true, gxp_power_state,
- requested_aggressor, AUR_MEM_UNDEFINED, memory_power_state);
+ mailbox->gxp, AUR_OFF, false, gxp_power_state,
+ requested_low_clkmux, AUR_MEM_UNDEFINED, memory_power_state);
ret = gxp_mailbox_enqueue_cmd(mailbox, cmd, &async_resp->resp,
/* resp_is_async = */ true);
if (ret)
@@ -886,7 +886,7 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
err_free_resp:
gxp_pm_update_requested_power_states(mailbox->gxp, gxp_power_state,
- requested_aggressor, AUR_OFF, true,
+ requested_low_clkmux, AUR_OFF, false,
memory_power_state,
AUR_MEM_UNDEFINED);
cancel_delayed_work_sync(&async_resp->timeout_work);
diff --git a/gxp-mailbox.h b/gxp-mailbox.h
index 986620b..4bea5d7 100644
--- a/gxp-mailbox.h
+++ b/gxp-mailbox.h
@@ -94,8 +94,11 @@ struct gxp_async_response {
uint gxp_power_state;
/* Specified memory power state vote during the command execution */
uint memory_power_state;
- /* Specified whether the power state vote is requested with aggressor flag */
- bool requested_aggressor;
+ /*
+ * Specified whether the power state vote is requested with low
+ * frequency CLKMUX flag.
+ */
+ bool requested_low_clkmux;
/* gxp_eventfd to signal when the response completes. May be NULL */
struct gxp_eventfd *eventfd;
};
@@ -206,7 +209,7 @@ int gxp_mailbox_execute_cmd_async(struct gxp_mailbox *mailbox,
spinlock_t *queue_lock,
wait_queue_head_t *queue_waitq,
uint gxp_power_state, uint memory_power_state,
- bool requested_aggressor,
+ bool requested_low_clkmux,
struct gxp_eventfd *eventfd);
int gxp_mailbox_register_interrupt_handler(struct gxp_mailbox *mailbox,
diff --git a/gxp-mapping.c b/gxp-mapping.c
index 6bdd707..9a69173 100644
--- a/gxp-mapping.c
+++ b/gxp-mapping.c
@@ -7,6 +7,7 @@
#include <linux/dma-mapping.h>
#include <linux/mm.h>
+#include <linux/mmap_lock.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -83,6 +84,7 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
* it with FOLL_WRITE.
* default to read/write if find_extend_vma returns NULL
*/
+ mmap_read_lock(current->mm);
vma = find_extend_vma(current->mm, user_address & PAGE_MASK);
if (vma) {
if (!(vma->vm_flags & VM_WRITE))
@@ -91,6 +93,7 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp,
dev_dbg(gxp->dev,
"unable to find address in VMA, assuming buffer writable");
}
+ mmap_read_unlock(current->mm);
/* Pin the user pages */
offset = user_address & (PAGE_SIZE - 1);
diff --git a/gxp-platform.c b/gxp-platform.c
index dca51a7..4988768 100644
--- a/gxp-platform.c
+++ b/gxp-platform.c
@@ -442,7 +442,7 @@ gxp_mailbox_command_compat(struct gxp_client *client,
&client->vd->mailbox_resp_queues[virt_core].queue,
&client->vd->mailbox_resp_queues[virt_core].lock,
&client->vd->mailbox_resp_queues[virt_core].waitq,
- gxp_power_state, memory_power_state, true,
+ gxp_power_state, memory_power_state, false,
client->mb_eventfds[virt_core]);
if (ret) {
dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
@@ -475,7 +475,7 @@ static int gxp_mailbox_command(struct gxp_client *client,
int virt_core, phys_core;
int ret = 0;
uint gxp_power_state, memory_power_state;
- bool requested_aggressor = false;
+ bool requested_low_clkmux = false;
if (copy_from_user(&ibuf, argp, sizeof(ibuf))) {
dev_err(gxp->dev,
@@ -498,6 +498,18 @@ static int gxp_mailbox_command(struct gxp_client *client,
return -EINVAL;
}
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
+ ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
+ }
+
+ if(ibuf.power_flags & GXP_POWER_NON_AGGRESSOR)
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_NON_AGGRESSOR is deprecated, no operation here");
+
/* Caller must hold VIRTUAL_DEVICE wakelock */
down_read(&client->semaphore);
@@ -545,14 +557,14 @@ static int gxp_mailbox_command(struct gxp_client *client,
cmd.buffer_descriptor = buffer;
gxp_power_state = aur_state_array[ibuf.gxp_power_state];
memory_power_state = aur_memory_state_array[ibuf.memory_power_state];
- requested_aggressor = (ibuf.power_flags & GXP_POWER_NON_AGGRESSOR) == 0;
+ requested_low_clkmux = (ibuf.power_flags & GXP_POWER_LOW_FREQ_CLKMUX) != 0;
ret = gxp_mailbox_execute_cmd_async(
gxp->mailbox_mgr->mailboxes[phys_core], &cmd,
&client->vd->mailbox_resp_queues[virt_core].queue,
&client->vd->mailbox_resp_queues[virt_core].lock,
&client->vd->mailbox_resp_queues[virt_core].waitq,
- gxp_power_state, memory_power_state, requested_aggressor,
+ gxp_power_state, memory_power_state, requested_low_clkmux,
client->mb_eventfds[virt_core]);
if (ret) {
dev_err(gxp->dev, "Failed to enqueue mailbox command (ret=%d)\n",
@@ -1254,6 +1266,13 @@ static int gxp_acquire_wake_lock_compat(
return -EINVAL;
}
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
+ ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
+ }
+
down_write(&client->semaphore);
if ((ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) &&
(!client->vd)) {
@@ -1324,12 +1343,12 @@ static int gxp_acquire_wake_lock_compat(
}
gxp_pm_update_requested_power_states(
- gxp, client->requested_power_state, client->requested_aggressor,
- aur_state_array[ibuf.gxp_power_state], true,
+ gxp, client->requested_power_state, client->requested_low_clkmux,
+ aur_state_array[ibuf.gxp_power_state], false,
client->requested_memory_power_state,
aur_memory_state_array[ibuf.memory_power_state]);
client->requested_power_state = aur_state_array[ibuf.gxp_power_state];
- client->requested_aggressor = true;
+ client->requested_low_clkmux = false;
client->requested_memory_power_state =
aur_memory_state_array[ibuf.memory_power_state];
out:
@@ -1360,7 +1379,7 @@ static int gxp_acquire_wake_lock(struct gxp_client *client,
struct gxp_dev *gxp = client->gxp;
struct gxp_acquire_wakelock_ioctl ibuf;
bool acquired_block_wakelock = false;
- bool requested_aggressor = false;
+ bool requested_low_clkmux = false;
int ret = 0;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
@@ -1385,6 +1404,18 @@ static int gxp_acquire_wake_lock(struct gxp_client *client,
return -EINVAL;
}
+ if (ibuf.gxp_power_state == GXP_POWER_STATE_READY) {
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_STATE_READY is deprecated, please set GXP_POWER_LOW_FREQ_CLKMUX with GXP_POWER_STATE_UUD state");
+ ibuf.gxp_power_state = GXP_POWER_STATE_UUD;
+ }
+
+ if(ibuf.flags & GXP_POWER_NON_AGGRESSOR)
+ dev_warn_once(
+ gxp->dev,
+ "GXP_POWER_NON_AGGRESSOR is deprecated, no operation here");
+
down_write(&client->semaphore);
if ((ibuf.components_to_wake & WAKELOCK_VIRTUAL_DEVICE) &&
(!client->vd)) {
@@ -1446,15 +1477,15 @@ static int gxp_acquire_wake_lock(struct gxp_client *client,
client->has_vd_wakelock = true;
}
- requested_aggressor = (ibuf.flags & GXP_POWER_NON_AGGRESSOR) == 0;
+ requested_low_clkmux = (ibuf.flags & GXP_POWER_LOW_FREQ_CLKMUX) != 0;
gxp_pm_update_requested_power_states(
- gxp, client->requested_power_state, client->requested_aggressor,
- aur_state_array[ibuf.gxp_power_state], requested_aggressor,
+ gxp, client->requested_power_state, client->requested_low_clkmux,
+ aur_state_array[ibuf.gxp_power_state], requested_low_clkmux,
client->requested_memory_power_state,
aur_memory_state_array[ibuf.memory_power_state]);
client->requested_power_state = aur_state_array[ibuf.gxp_power_state];
- client->requested_aggressor = requested_aggressor;
+ client->requested_low_clkmux = requested_low_clkmux;
client->requested_memory_power_state =
aur_memory_state_array[ibuf.memory_power_state];
out:
@@ -1536,12 +1567,12 @@ static int gxp_release_wake_lock(struct gxp_client *client, __u32 __user *argp)
*/
gxp_pm_update_requested_power_states(
gxp, client->requested_power_state,
- client->requested_aggressor, AUR_OFF, true,
+ client->requested_low_clkmux, AUR_OFF, false,
client->requested_memory_power_state,
AUR_MEM_UNDEFINED);
client->requested_power_state = AUR_OFF;
client->requested_memory_power_state = AUR_MEM_UNDEFINED;
-
+ client->requested_low_clkmux = false;
client->has_block_wakelock = false;
}
diff --git a/gxp-pm.c b/gxp-pm.c
index 2a7a4ab..8a501b7 100644
--- a/gxp-pm.c
+++ b/gxp-pm.c
@@ -93,7 +93,7 @@ static int gxp_pm_blkpwr_down(struct gxp_dev *gxp)
return ret;
}
-static int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state, bool aggressor)
+static int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state)
{
unsigned long rate;
@@ -104,8 +104,6 @@ static int gxp_pm_blk_set_state_acpm(struct gxp_dev *gxp, unsigned long state, b
gxp->dev,
"Requesting power state higher than current thermal limit (%lu)\n",
rate);
- if (!aggressor)
- rate |= BIT(AUR_NON_AGGRESSOR_BIT);
return gxp_pm_blk_set_rate_acpm(gxp, rate);
}
@@ -135,22 +133,27 @@ static void reset_cmu_mux_state(struct gxp_dev *gxp)
set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_NORMAL);
}
-void gxp_pm_force_cmu_noc_user_mux_normal(struct gxp_dev *gxp)
+void gxp_pm_force_clkmux_normal(struct gxp_dev *gxp)
{
mutex_lock(&gxp->power_mgr->pm_lock);
- if (gxp->power_mgr->curr_state == AUR_READY)
+ if (gxp->power_mgr->curr_low_clkmux) {
+ set_cmu_pll_aur_mux_state(gxp, AUR_CMU_MUX_NORMAL);
set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_NORMAL);
- gxp->power_mgr->force_noc_mux_normal_count++;
+ }
+ gxp->power_mgr->force_mux_normal_count++;
mutex_unlock(&gxp->power_mgr->pm_lock);
}
-void gxp_pm_check_cmu_noc_user_mux(struct gxp_dev *gxp)
+void gxp_pm_resume_clkmux(struct gxp_dev *gxp)
{
mutex_lock(&gxp->power_mgr->pm_lock);
- gxp->power_mgr->force_noc_mux_normal_count--;
- if (gxp->power_mgr->force_noc_mux_normal_count == 0)
- if (gxp->power_mgr->curr_state == AUR_READY)
+ gxp->power_mgr->force_mux_normal_count--;
+ if (gxp->power_mgr->force_mux_normal_count == 0) {
+ if (gxp->power_mgr->curr_low_clkmux) {
+ set_cmu_pll_aur_mux_state(gxp, AUR_CMU_MUX_LOW);
set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_LOW);
+ }
+ }
mutex_unlock(&gxp->power_mgr->pm_lock);
}
@@ -158,29 +161,33 @@ static void gxp_pm_blk_set_state_acpm_async(struct work_struct *work)
{
struct gxp_set_acpm_state_work *set_acpm_state_work =
container_of(work, struct gxp_set_acpm_state_work, work);
+ struct gxp_dev *gxp = set_acpm_state_work->gxp;
+ struct gxp_power_manager *mgr = gxp->power_mgr;
+ bool scheduled_low_clkmux, prev_low_clkmux;
+ bool is_core_booting;
- mutex_lock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
- if (set_acpm_state_work->gxp->power_mgr->curr_state == AUR_OFF)
+ mutex_lock(&mgr->pm_lock);
+ if (mgr->curr_state == AUR_OFF)
goto out;
- /*
- * This prev_state may be out of date with the manager's current state,
- * but we don't need curr_state here. curr_state is the last scheduled
- * state, while prev_state was the last actually requested state. It's
- * true because all request are executed synchronously and executed in
- * FIFO order.
- */
- if (set_acpm_state_work->prev_state == AUR_READY) {
- set_cmu_pll_aur_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_NORMAL);
- set_cmu_noc_user_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_NORMAL);
- } else if (set_acpm_state_work->state == AUR_READY) {
- set_cmu_pll_aur_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_LOW);
- /* Switch NOC_USER mux to low state only if no core is starting the firmware */
- if (set_acpm_state_work->gxp->power_mgr->force_noc_mux_normal_count == 0)
- set_cmu_noc_user_mux_state(set_acpm_state_work->gxp, AUR_CMU_MUX_LOW);
+
+ scheduled_low_clkmux = set_acpm_state_work->low_clkmux;
+ prev_low_clkmux = set_acpm_state_work->prev_low_clkmux;
+ is_core_booting = mgr->force_mux_normal_count != 0;
+
+ /* Don't change clkmux states when any core is booting */
+ if (scheduled_low_clkmux != prev_low_clkmux && !is_core_booting) {
+ if (prev_low_clkmux) {
+ set_cmu_pll_aur_mux_state(gxp, AUR_CMU_MUX_NORMAL);
+ set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_NORMAL);
+ } else if (scheduled_low_clkmux) {
+ set_cmu_pll_aur_mux_state(gxp, AUR_CMU_MUX_LOW);
+ set_cmu_noc_user_mux_state(gxp, AUR_CMU_MUX_LOW);
+ }
}
+ mgr->curr_low_clkmux = scheduled_low_clkmux;
+
gxp_pm_blk_set_state_acpm(set_acpm_state_work->gxp,
- set_acpm_state_work->state,
- set_acpm_state_work->aggressor_vote);
+ set_acpm_state_work->state);
out:
set_acpm_state_work->using = false;
mutex_unlock(&set_acpm_state_work->gxp->power_mgr->pm_lock);
@@ -207,8 +214,7 @@ int gxp_pm_blk_on(struct gxp_dev *gxp)
mutex_lock(&gxp->power_mgr->pm_lock);
ret = gxp_pm_blkpwr_up(gxp);
if (!ret) {
- gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE,
- true /*aggressor*/);
+ gxp_pm_blk_set_state_acpm(gxp, AUR_INIT_DVFS_STATE);
gxp->power_mgr->curr_state = AUR_INIT_DVFS_STATE;
}
@@ -322,7 +328,7 @@ int gxp_pm_core_off(struct gxp_dev *gxp, uint core)
static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
enum aur_power_state state,
- bool aggressor_vote)
+ bool low_clkmux_vote)
{
uint i;
@@ -335,56 +341,43 @@ static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
"Cannot request power state when BLK is off\n");
return -EBUSY;
}
+ if (state == AUR_OFF)
+ return 0;
+retry:
if (state != gxp->power_mgr->curr_state ||
- aggressor_vote != gxp->power_mgr->curr_aggressor_vote) {
- if (state != AUR_OFF) {
- mutex_lock(&gxp->power_mgr->set_acpm_state_work_lock);
-
- for (i = 0; i < AUR_NUM_POWER_STATE_WORKER; i++) {
- if (!gxp->power_mgr->set_acpm_state_work[i]
- .using)
- break;
- }
- /* The workqueue is full, wait for it */
- if (i == AUR_NUM_POWER_STATE_WORKER) {
- dev_warn(
- gxp->dev,
- "The workqueue for power state transition is full");
- mutex_unlock(&gxp->power_mgr->pm_lock);
- flush_workqueue(gxp->power_mgr->wq);
- mutex_lock(&gxp->power_mgr->pm_lock);
-
- /* Verify that a request is still needed */
- if (state == gxp->power_mgr->curr_state &&
- aggressor_vote ==
- gxp->power_mgr->curr_aggressor_vote) {
- mutex_unlock(
- &gxp->power_mgr
- ->set_acpm_state_work_lock);
- return 0;
- }
-
- /*
- * All set_acpm_state_work should be available
- * now, pick the first one.
- */
- i = 0;
- }
- gxp->power_mgr->set_acpm_state_work[i].state = state;
- gxp->power_mgr->set_acpm_state_work[i].aggressor_vote =
- aggressor_vote;
- gxp->power_mgr->set_acpm_state_work[i].prev_state =
- gxp->power_mgr->curr_state;
- gxp->power_mgr->set_acpm_state_work[i].using = true;
- queue_work(
- gxp->power_mgr->wq,
- &gxp->power_mgr->set_acpm_state_work[i].work);
-
- gxp->power_mgr->curr_state = state;
- gxp->power_mgr->curr_aggressor_vote = aggressor_vote;
+ low_clkmux_vote != gxp->power_mgr->last_scheduled_low_clkmux) {
+ mutex_lock(&gxp->power_mgr->set_acpm_state_work_lock);
+ for (i = 0; i < AUR_NUM_POWER_STATE_WORKER; i++) {
+ if (!gxp->power_mgr->set_acpm_state_work[i].using)
+ break;
+ }
+ /* The workqueue is full, wait for it */
+ if (i == AUR_NUM_POWER_STATE_WORKER) {
+ dev_warn(
+ gxp->dev,
+ "The workqueue for power state transition is full");
mutex_unlock(&gxp->power_mgr->set_acpm_state_work_lock);
+ mutex_unlock(&gxp->power_mgr->pm_lock);
+ flush_workqueue(gxp->power_mgr->wq);
+ mutex_lock(&gxp->power_mgr->pm_lock);
+ goto retry;
}
+ gxp->power_mgr->set_acpm_state_work[i].state = state;
+ gxp->power_mgr->set_acpm_state_work[i].low_clkmux =
+ low_clkmux_vote;
+ gxp->power_mgr->set_acpm_state_work[i].prev_state =
+ gxp->power_mgr->curr_state;
+ gxp->power_mgr->set_acpm_state_work[i].prev_low_clkmux =
+ gxp->power_mgr->last_scheduled_low_clkmux;
+ gxp->power_mgr->set_acpm_state_work[i].using = true;
+ queue_work(gxp->power_mgr->wq,
+ &gxp->power_mgr->set_acpm_state_work[i].work);
+
+ gxp->power_mgr->curr_state = state;
+ gxp->power_mgr->last_scheduled_low_clkmux = low_clkmux_vote;
+
+ mutex_unlock(&gxp->power_mgr->set_acpm_state_work_lock);
}
return 0;
@@ -393,18 +386,18 @@ static int gxp_pm_req_state_locked(struct gxp_dev *gxp,
/* Caller must hold pm_lock */
static void gxp_pm_revoke_power_state_vote(struct gxp_dev *gxp,
enum aur_power_state revoked_state,
- bool origin_requested_aggressor)
+ bool origin_requested_low_clkmux)
{
unsigned int i;
uint *pwr_state_req_count;
if (revoked_state == AUR_OFF)
return;
- if (origin_requested_aggressor)
+ if (!origin_requested_low_clkmux)
pwr_state_req_count = gxp->power_mgr->pwr_state_req_count;
else
pwr_state_req_count =
- gxp->power_mgr->non_aggressor_pwr_state_req_count;
+ gxp->power_mgr->low_clkmux_pwr_state_req_count;
for (i = 0; i < AUR_NUM_POWER_STATE; i++) {
if (aur_state_array[i] == revoked_state) {
@@ -421,18 +414,18 @@ static void gxp_pm_revoke_power_state_vote(struct gxp_dev *gxp,
/* Caller must hold pm_lock */
static void gxp_pm_vote_power_state(struct gxp_dev *gxp,
enum aur_power_state state,
- bool requested_aggressor)
+ bool requested_low_clkmux)
{
unsigned int i;
uint *pwr_state_req_count;
if (state == AUR_OFF)
return;
- if (requested_aggressor)
+ if (!requested_low_clkmux)
pwr_state_req_count = gxp->power_mgr->pwr_state_req_count;
else
pwr_state_req_count =
- gxp->power_mgr->non_aggressor_pwr_state_req_count;
+ gxp->power_mgr->low_clkmux_pwr_state_req_count;
for (i = 0; i < AUR_NUM_POWER_STATE; i++) {
if (aur_state_array[i] == state) {
@@ -445,23 +438,26 @@ static void gxp_pm_vote_power_state(struct gxp_dev *gxp,
/* Caller must hold pm_lock */
static void gxp_pm_get_max_voted_power_state(struct gxp_dev *gxp,
unsigned long *state,
- bool *aggressor_vote)
+ bool *low_clkmux_vote)
{
int i;
*state = AUR_OFF;
for (i = AUR_NUM_POWER_STATE - 1; i >= 0; i--) {
if (gxp->power_mgr->pwr_state_req_count[i] > 0) {
- *aggressor_vote = true;
+ *low_clkmux_vote = false;
*state = aur_state_array[i];
break;
}
}
if (*state == AUR_OFF) {
- /* No aggressor vote, check non-aggressor vote counts */
- *aggressor_vote = false;
+ /*
+ * All votes requested with low frequency CLKMUX flag, check low
+ * frequency CLKMUX vote counts.
+ */
+ *low_clkmux_vote = true;
for (i = AUR_NUM_POWER_STATE - 1; i >= 0; i--) {
- if (gxp->power_mgr->non_aggressor_pwr_state_req_count[i] > 0) {
+ if (gxp->power_mgr->low_clkmux_pwr_state_req_count[i] > 0) {
*state = aur_state_array[i];
break;
}
@@ -471,12 +467,12 @@ static void gxp_pm_get_max_voted_power_state(struct gxp_dev *gxp,
static int gxp_pm_update_requested_power_state(
struct gxp_dev *gxp, enum aur_power_state origin_state,
- bool origin_requested_aggressor, enum aur_power_state requested_state,
- bool requested_aggressor)
+ bool origin_requested_low_clkmux, enum aur_power_state requested_state,
+ bool requested_low_clkmux)
{
int ret;
unsigned long max_state = AUR_OFF;
- bool aggressor_vote = false;
+ bool low_clkmux_vote = false;
lockdep_assert_held(&gxp->power_mgr->pm_lock);
if (gxp->power_mgr->curr_state == AUR_OFF &&
@@ -485,10 +481,10 @@ static int gxp_pm_update_requested_power_state(
"The client vote power state %d when BLK is off\n",
requested_state);
}
- gxp_pm_revoke_power_state_vote(gxp, origin_state, origin_requested_aggressor);
- gxp_pm_vote_power_state(gxp, requested_state, requested_aggressor);
- gxp_pm_get_max_voted_power_state(gxp, &max_state, &aggressor_vote);
- ret = gxp_pm_req_state_locked(gxp, max_state, aggressor_vote);
+ gxp_pm_revoke_power_state_vote(gxp, origin_state, origin_requested_low_clkmux);
+ gxp_pm_vote_power_state(gxp, requested_state, requested_low_clkmux);
+ gxp_pm_get_max_voted_power_state(gxp, &max_state, &low_clkmux_vote);
+ ret = gxp_pm_req_state_locked(gxp, max_state, low_clkmux_vote);
return ret;
}
@@ -528,6 +524,7 @@ static int gxp_pm_req_memory_state_locked(struct gxp_dev *gxp,
"Cannot request memory power state when BLK is off\n");
return -EBUSY;
}
+retry:
if (state != gxp->power_mgr->curr_memory_state) {
mutex_lock(&gxp->power_mgr->req_pm_qos_work_lock);
@@ -540,22 +537,11 @@ static int gxp_pm_req_memory_state_locked(struct gxp_dev *gxp,
dev_warn(
gxp->dev,
"The workqueue for memory power state transition is full");
+ mutex_unlock(&gxp->power_mgr->req_pm_qos_work_lock);
mutex_unlock(&gxp->power_mgr->pm_lock);
flush_workqueue(gxp->power_mgr->wq);
mutex_lock(&gxp->power_mgr->pm_lock);
-
- /* Verify that a request is still needed */
- if (state == gxp->power_mgr->curr_memory_state) {
- mutex_unlock(
- &gxp->power_mgr->req_pm_qos_work_lock);
- return 0;
- }
-
- /*
- * All req_pm_qos_work should be available
- * now, pick the first one.
- */
- i = 0;
+ goto retry;
}
gxp->power_mgr->curr_memory_state = state;
int_val = aur_memory_state2int_table[state];
@@ -643,18 +629,18 @@ static int gxp_pm_update_requested_memory_power_state(
int gxp_pm_update_requested_power_states(
struct gxp_dev *gxp, enum aur_power_state origin_state,
- bool origin_requested_aggressor, enum aur_power_state requested_state,
- bool requested_aggressor, enum aur_memory_power_state origin_mem_state,
+ bool origin_requested_low_clkmux, enum aur_power_state requested_state,
+ bool requested_low_clkmux, enum aur_memory_power_state origin_mem_state,
enum aur_memory_power_state requested_mem_state)
{
int ret = 0;
mutex_lock(&gxp->power_mgr->pm_lock);
if (origin_state != requested_state ||
- origin_requested_aggressor != requested_aggressor) {
+ origin_requested_low_clkmux != requested_low_clkmux) {
ret = gxp_pm_update_requested_power_state(
- gxp, origin_state, origin_requested_aggressor,
- requested_state, requested_aggressor);
+ gxp, origin_state, origin_requested_low_clkmux,
+ requested_state, requested_low_clkmux);
if (ret)
goto out;
}
@@ -678,7 +664,8 @@ int gxp_pm_init(struct gxp_dev *gxp)
mutex_init(&mgr->pm_lock);
mgr->curr_state = AUR_OFF;
mgr->curr_memory_state = AUR_MEM_UNDEFINED;
- mgr->curr_aggressor_vote = true;
+ mgr->curr_low_clkmux = false;
+ mgr->last_scheduled_low_clkmux = false;
mgr->ops = &gxp_aur_ops;
gxp->power_mgr = mgr;
for (i = 0; i < AUR_NUM_POWER_STATE_WORKER; i++) {
@@ -695,7 +682,7 @@ int gxp_pm_init(struct gxp_dev *gxp)
mutex_init(&mgr->req_pm_qos_work_lock);
gxp->power_mgr->wq =
create_singlethread_workqueue("gxp_power_work_queue");
- gxp->power_mgr->force_noc_mux_normal_count = 0;
+ gxp->power_mgr->force_mux_normal_count = 0;
gxp->power_mgr->blk_switch_count = 0l;
pm_runtime_enable(gxp->dev);
diff --git a/gxp-pm.h b/gxp-pm.h
index cd285c8..f9031ca 100644
--- a/gxp-pm.h
+++ b/gxp-pm.h
@@ -64,13 +64,6 @@ enum aur_power_cmu_mux_state {
#define AUR_MAX_ALLOW_STATE AUR_UD_PLUS
#define AUR_MAX_ALLOW_MEMORY_STATE AUR_MEM_MAX
-/*
- * The bit to indicate non-aggressor vote for `exynos_acpm_set_rate`.
- * Lower 3 byte of frequency parameter of `exynos_acpm_set_rate` will still be
- * the requested rate.
- */
-#define AUR_NON_AGGRESSOR_BIT 24
-
#define AUR_NUM_POWER_STATE_WORKER 16
struct gxp_pm_device_ops {
@@ -85,7 +78,8 @@ struct gxp_set_acpm_state_work {
struct gxp_dev *gxp;
unsigned long state;
unsigned long prev_state;
- bool aggressor_vote;
+ bool low_clkmux;
+ bool prev_low_clkmux;
bool using;
};
@@ -101,9 +95,17 @@ struct gxp_power_manager {
struct gxp_dev *gxp;
struct mutex pm_lock;
uint pwr_state_req_count[AUR_NUM_POWER_STATE];
- uint non_aggressor_pwr_state_req_count[AUR_NUM_POWER_STATE];
+ uint low_clkmux_pwr_state_req_count[AUR_NUM_POWER_STATE];
uint mem_pwr_state_req_count[AUR_NUM_MEMORY_POWER_STATE];
- bool curr_aggressor_vote;
+ /*
+ * Last set CLKMUX state by asynchronous request handler.
+ * If a core is booting, we shouldn't change clock mux state. This is
+ * the expected state to set after all cores booting are finished.
+ * Otherwise, it's the real state of CLKMUX.
+ */
+ bool curr_low_clkmux;
+ /* Last requested clock mux state */
+ bool last_scheduled_low_clkmux;
int curr_state;
int curr_memory_state;
struct gxp_pm_device_ops *ops;
@@ -118,7 +120,7 @@ struct gxp_power_manager {
/* INT/MIF requests for memory bandwidth */
struct exynos_pm_qos_request int_min;
struct exynos_pm_qos_request mif_min;
- int force_noc_mux_normal_count;
+ int force_mux_normal_count;
/* Max frequency that the thermal driver/ACPM will allow in Hz */
unsigned long thermal_limit;
u64 blk_switch_count;
@@ -237,12 +239,12 @@ int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp);
* @gxp: The GXP device to operate.
* @origin_state: An existing old requested state, will be cleared. If this is
* the first vote, pass AUR_OFF.
- * @origin_requested_aggressor: Specify whether the existing vote was requested with
- * aggressor flag.
+ * @origin_requested_low_clkmux: Specify whether the existing vote was requested with
+ * low frequency CLKMUX flag.
* @requested_state: The new requested state.
- * @requested_aggressor: Specify whether the new vote is requested with aggressor
- * flag. Will take no effect if the @requested state is
- * AUR_OFF.
+ * @requested_low_clkmux: Specify whether the new vote is requested with low frequency
+ * CLKMUX flag. Will take no effect if the @requested state is
+ * AUR_OFF.
* @origin_mem_state: An existing old requested state, will be cleared. If this is
* the first vote, pass AUR_MEM_UNDEFINED.
* @requested_mem_state: The new requested state.
@@ -254,23 +256,23 @@ int gxp_pm_blk_get_state_acpm(struct gxp_dev *gxp);
int gxp_pm_update_requested_power_states(
struct gxp_dev *gxp, enum aur_power_state origin_state,
- bool origin_requested_aggressor, enum aur_power_state requested_state,
- bool requested_aggressor, enum aur_memory_power_state origin_mem_state,
+ bool origin_requested_low_clkmux, enum aur_power_state requested_state,
+ bool requested_low_clkmux, enum aur_memory_power_state origin_mem_state,
enum aur_memory_power_state requested_mem_state);
/*
- * gxp_pm_force_cmu_noc_user_mux_normal() - Force PLL_CON0_NOC_USER MUX switch to the
- * normal state. This is required to guarantee LPM works when the core is starting the
- * firmware.
+ * gxp_pm_force_clkmux_normal() - Force PLL_CON0_NOC_USER and PLL_CON0_PLL_AUR MUX
+ * switch to the normal state. This is required to guarantee LPM works when the core
+ * is starting the firmware.
*/
-void gxp_pm_force_cmu_noc_user_mux_normal(struct gxp_dev *gxp);
+void gxp_pm_force_clkmux_normal(struct gxp_dev *gxp);
/*
- * gxp_pm_check_cmu_noc_user_mux() - Check PLL_CON0_NOC_USER MUX state modified
- * by gxp_pm_force_cmu_noc_user_mux_normal(). If the requested state is
- * AUR_READY, should set it to AUR_CMU_MUX_LOW.
+ * gxp_pm_resume_clkmux() - Check PLL_CON0_NOC_USER and PLL_CON0_PLL_AUR MUX state
+ * modified by gxp_pm_force_clkmux_normal(). If the current vote is requested with low
+ * frequency CLKMUX flag, should set the MUX state to AUR_CMU_MUX_LOW.
*/
-void gxp_pm_check_cmu_noc_user_mux(struct gxp_dev *gxp);
+void gxp_pm_resume_clkmux(struct gxp_dev *gxp);
/**
* gxp_pm_set_thermal_limit() - Notify the power manager of a thermal limit
diff --git a/gxp-vd.c b/gxp-vd.c
index ad08207..c22e30d 100644
--- a/gxp-vd.c
+++ b/gxp-vd.c
@@ -264,57 +264,37 @@ int gxp_vd_start(struct gxp_virtual_device *vd)
gxp_dma_map_core_resources(gxp, vd, virt_core, core);
map_telemetry_buffers(gxp, vd, virt_core, core);
map_debug_dump_buffer(gxp, vd, virt_core, core);
- ret = gxp_firmware_run(gxp, vd, virt_core, core);
- if (ret) {
- dev_err(gxp->dev, "Failed to run firmware on core %u\n",
- core);
- /*
- * out_vd_stop will only clean up the cores that
- * had their firmware start successfully, so we
- * need to clean up `core` here.
- */
- unmap_debug_dump_buffer(gxp, vd, virt_core,
- core);
- unmap_telemetry_buffers(gxp, vd, virt_core,
- core);
- gxp_dma_unmap_core_resources(gxp, vd, virt_core,
- core);
- gxp_dma_domain_detach_device(gxp, vd,
- virt_core);
- gxp->core_to_vd[core] = NULL;
- goto out_vd_stop;
- }
virt_core++;
}
}
- if (cores_remaining != 0) {
- dev_err(gxp->dev,
- "Internal error: Failed to start %u requested cores. %u cores remaining\n",
- vd->num_cores, cores_remaining);
- /*
- * Should never reach here. Previously verified that enough
- * cores are available.
- */
- WARN_ON(true);
- ret = -EIO;
- goto out_vd_stop;
- }
- vd->state = GXP_VD_RUNNING;
+ ret = gxp_firmware_run(gxp, vd, core_list);
+ if (ret)
+ goto error;
+ vd->state = GXP_VD_RUNNING;
return ret;
-out_vd_stop:
- gxp_vd_stop(vd);
+error:
+ virt_core = 0;
+ for (core = 0; core < GXP_NUM_CORES; core++) {
+ if (core_list & BIT(core)) {
+ unmap_debug_dump_buffer(gxp, vd, virt_core, core);
+ unmap_telemetry_buffers(gxp, vd, virt_core, core);
+ gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
+ gxp_dma_domain_detach_device(gxp, vd, virt_core);
+ gxp->core_to_vd[core] = NULL;
+ virt_core++;
+ }
+ }
return ret;
-
}
/* Caller must hold gxp->vd_semaphore for writing */
void gxp_vd_stop(struct gxp_virtual_device *vd)
{
struct gxp_dev *gxp = vd->gxp;
- uint core;
+ uint core, core_list = 0;
uint virt_core = 0;
uint lpm_state;
@@ -332,9 +312,14 @@ void gxp_vd_stop(struct gxp_virtual_device *vd)
}
}
+ for (core = 0; core < GXP_NUM_CORES; core++)
+ if (gxp->core_to_vd[core] == vd)
+ core_list |= BIT(core);
+
+ gxp_firmware_stop(gxp, vd, core_list);
+
for (core = 0; core < GXP_NUM_CORES; core++) {
if (gxp->core_to_vd[core] == vd) {
- gxp_firmware_stop(gxp, vd, virt_core, core);
unmap_debug_dump_buffer(gxp, vd, virt_core, core);
unmap_telemetry_buffers(gxp, vd, virt_core, core);
gxp_dma_unmap_core_resources(gxp, vd, virt_core, core);
@@ -369,7 +354,7 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
"Attempt to suspend a virtual device twice\n");
return;
}
- gxp_pm_force_cmu_noc_user_mux_normal(gxp);
+ gxp_pm_force_clkmux_normal(gxp);
/*
* Start the suspend process for all of this VD's cores without waiting
* for completion.
@@ -432,7 +417,7 @@ void gxp_vd_suspend(struct gxp_virtual_device *vd)
gxp_pm_get_blk_switch_count(gxp);
vd->state = GXP_VD_SUSPENDED;
}
- gxp_pm_check_cmu_noc_user_mux(gxp);
+ gxp_pm_resume_clkmux(gxp);
}
/*
@@ -456,7 +441,7 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
"Attempt to resume a virtual device which was not suspended\n");
return -EBUSY;
}
- gxp_pm_force_cmu_noc_user_mux_normal(gxp);
+ gxp_pm_force_clkmux_normal(gxp);
curr_blk_switch_count = gxp_pm_get_blk_switch_count(gxp);
/*
* Start the resume process for all of this VD's cores without waiting
@@ -533,7 +518,7 @@ int gxp_vd_resume(struct gxp_virtual_device *vd)
} else {
vd->state = GXP_VD_RUNNING;
}
- gxp_pm_check_cmu_noc_user_mux(gxp);
+ gxp_pm_resume_clkmux(gxp);
return ret;
}
diff --git a/gxp.h b/gxp.h
index 0582236..ed0a78d 100644
--- a/gxp.h
+++ b/gxp.h
@@ -116,9 +116,10 @@ struct gxp_virtual_device_ioctl {
/*
* DSP subsystem Power state values for use as `gxp_power_state` in
* `struct gxp_acquire_wakelock_ioctl`.
- * Note: GXP_POWER_STATE_READY is the state to keep the BLOCK idle. By setting
- * this state, the driver will request UUD frequency and switch the CMUMUX
- * clocks into 25 MHz to save more power.
+ * Note: GXP_POWER_STATE_READY is a deprecated state. The way to achieve
+ * original state is to request GXP_POWER_STATE_UUD with setting
+ * GXP_POWER_LOW_FREQ_CLKMUX flag. Requesting GXP_POWER_STATE_READY is treated
+ * as identical to GXP_POWER_STATE_UUD.
*/
#define GXP_POWER_STATE_OFF 0
#define GXP_POWER_STATE_UUD 1
@@ -146,14 +147,24 @@ struct gxp_virtual_device_ioctl {
/*
* GXP power flag macros, supported by `flags` in `gxp_acquire_wakelock_ioctl`
* and `power_flags in `gxp_mailbox_command_ioctl`.
- * The client can request non-aggressor vote by this flag, which means if the
- * requested voltage is lower than the current voltage of VDD_CAM, adopt the
- * current voltage of VDD_CAM for DSP. On the other hand, if the requested
- * voltage is higher, adopt the requested one for DSP.
*
- * Note: aggressor votes will have higher priority than non-aggressor votes.
+ * Non-aggressor flag is deprecated. Setting this flag is a no-op since
+ * non-aggressor support is defeatured.
*/
#define GXP_POWER_NON_AGGRESSOR (1 << 0)
+/*
+ * The client can request low frequency clkmux vote by this flag, which means
+ * the kernel driver will switch the CLKMUX clocks to save more power.
+ *
+ * Note: The kernel driver keep seperate track of low frequency clkmux votes
+ * and normal votes, and the low frequency clkmux votes will have lower priority
+ * than all normal votes.
+ * For example, if the kerenl driver has two votes, one is GXP_POWER_STATE_UUD
+ * without GXP_POWER_LOW_FREQ_CLKMUX, and the other one is GXP_POWER_STATE_NOM
+ * with GXP_POWER_LOW_FREQ_CLKMUX. The voting result is GXP_POWER_STATE_UUD
+ * without GXP_POWER_LOW_FREQ_CLKMUX.
+ */
+#define GXP_POWER_LOW_FREQ_CLKMUX (1 << 1)
struct gxp_acquire_wakelock_ioctl {
/*
@@ -199,22 +210,11 @@ struct gxp_acquire_wakelock_ioctl {
* Set RESERVED bits to 0 to ensure backwards compatibility.
*
* Bitfields:
- * [0:0] - NON_AGGRESSOR setting for ACPM:
- * 0 = AGGRESSOR, default value
- * 1 = NON_AGGRESSOR
- * If the client makes a NON_AGGRESSOR request, the DSP is
- * only guaranteed to operate at `gxp_power_state` when it
- * is the only component active on its voltage rail. If
- * another component becomes active on the rail, at any
- * point while a NON_AGGRESSOR request is active, the rail
- * will defer to the other component's requested state.
- *
- * Note: An AGGRESSOR request from any client overrides all
- * NON_AGGRESSOR requests. At that point, the DSP will
- * operate at the AGGRESSOR request's `gxp_power_state`,
- * regardless of other components on the DSP's rail or what
- * power state any NON_AGGRESSOR requests specified.
- * [31:1] - RESERVED
+ * [0:0] - Deprecated, do not use
+ * [1:1] - LOW_FREQ_CLKMUX setting for power management
+ * 0 = Don't switch CLKMUX clocks, default value
+ * 1 = Switch CLKMUX clocks
+ * [31:2] - RESERVED
*/
__u32 flags;
};
@@ -483,12 +483,11 @@ struct gxp_mailbox_command_ioctl {
* Set RESERVED bits to 0 to ensure backwards compatibility.
*
* Bitfields:
- * [0:0] - NON_AGGRESSOR setting for ACPM:
- * 0 = AGGRESSOR, default value
- * 1 = NON_AGGRESSOR
- * Note: It takes effect only if every client holds a
- * wakelock with NON_AGGRESSOR.
- * [31:1] - RESERVED
+ * [0:0] - Deprecated, do not use
+ * [1:1] - LOW_FREQ_CLKMUX setting for power management
+ * 0 = Don't switch CLKMUX clocks, default value
+ * 1 = Switch CLKMUX clocks
+ * [31:2] - RESERVED
*/
__u32 power_flags;
};