summaryrefslogtreecommitdiff
path: root/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/backend/gpu/mali_kbase_pm_policy.c')
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_policy.c72
1 files changed, 55 insertions, 17 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_policy.c b/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
index cb38c6e..7d7650c 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -54,7 +54,9 @@ void kbase_pm_policy_init(struct kbase_device *kbdev)
unsigned long flags;
int i;
- if (of_property_read_string(np, "power_policy", &power_policy_name) == 0) {
+ /* Read "power-policy" property and fallback to "power_policy" if not found */
+ if ((of_property_read_string(np, "power-policy", &power_policy_name) == 0) ||
+ (of_property_read_string(np, "power_policy", &power_policy_name) == 0)) {
for (i = 0; i < ARRAY_SIZE(all_policy_list); i++)
if (sysfs_streq(all_policy_list[i]->name, power_policy_name)) {
default_policy = all_policy_list[i];
@@ -117,10 +119,12 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
} else {
/* Cancel the invocation of
* kbase_pm_gpu_poweroff_wait_wq() from the L2 state
- * machine. This is safe - it
+ * machine. This is safe - if
* invoke_poweroff_wait_wq_when_l2_off is true, then
* the poweroff work hasn't even been queued yet,
- * meaning we can go straight to powering on.
+ * meaning we can go straight to powering on. We must
+ * however wake_up(poweroff_wait) in case someone was
+ * waiting for poweroff_wait_in_progress to become false.
*/
pm->backend.invoke_poweroff_wait_wq_when_l2_off = false;
pm->backend.poweroff_wait_in_progress = false;
@@ -130,6 +134,7 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
#endif
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ wake_up(&kbdev->pm.backend.poweroff_wait);
kbase_pm_do_poweron(kbdev, false);
}
} else {
@@ -293,6 +298,10 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
unsigned int new_policy_csf_pm_sched_flags;
bool sched_suspend;
bool reset_gpu = false;
+ bool reset_op_prevented = true;
+ struct kbase_csf_scheduler *scheduler = NULL;
+ u32 pwroff;
+ bool switching_to_always_on;
#endif
KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -301,9 +310,33 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
KBASE_KTRACE_ADD(kbdev, PM_SET_POLICY, NULL, new_policy->id);
#if MALI_USE_CSF
+ pwroff = kbase_csf_firmware_get_mcu_core_pwroff_time(kbdev);
+ switching_to_always_on = new_policy == &kbase_pm_always_on_policy_ops;
+ if (pwroff == 0 && !switching_to_always_on) {
+ dev_warn(kbdev->dev,
+ "power_policy: cannot switch away from always_on with mcu_shader_pwroff_timeout set to 0\n");
+ dev_warn(kbdev->dev,
+ "power_policy: resetting mcu_shader_pwroff_timeout to default value to switch policy from always_on\n");
+ kbase_csf_firmware_reset_mcu_core_pwroff_time(kbdev);
+ }
+
+ scheduler = &kbdev->csf.scheduler;
+ KBASE_DEBUG_ASSERT(scheduler != NULL);
+
/* Serialize calls on kbase_pm_set_policy() */
mutex_lock(&kbdev->pm.backend.policy_change_lock);
+ if (kbase_reset_gpu_prevent_and_wait(kbdev)) {
+ dev_warn(kbdev->dev, "Set PM policy failing to prevent gpu reset");
+ reset_op_prevented = false;
+ }
+
+ /* In case of CSF, the scheduler may be invoked to suspend. In that
+ * case, there is a risk that the L2 may be turned on by the time we
+ * check it here. So we hold the scheduler lock to avoid other operations
+ * interfering with the policy change and vice versa.
+ */
+ rt_mutex_lock(&scheduler->lock);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
/* policy_change_clamp_state_to_off, when needed, is set/cleared in
* this function, a very limited temporal scope for covering the
@@ -316,23 +349,22 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
* the always_on policy, reflected by the CSF_DYNAMIC_PM_CORE_KEEP_ON
* flag bit.
*/
- sched_suspend = kbdev->csf.firmware_inited &&
+ sched_suspend = reset_op_prevented &&
(CSF_DYNAMIC_PM_CORE_KEEP_ON &
- (new_policy_csf_pm_sched_flags |
- kbdev->pm.backend.csf_pm_sched_flags));
+ (new_policy_csf_pm_sched_flags | kbdev->pm.backend.csf_pm_sched_flags));
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- if (sched_suspend)
- kbase_csf_scheduler_pm_suspend(kbdev);
+ if (sched_suspend) {
+ /* Update the suspend flag to reflect actually suspend being done ! */
+ sched_suspend = !kbase_csf_scheduler_pm_suspend_no_lock(kbdev);
+ /* Set the reset recovery flag if the required suspend failed */
+ reset_gpu = !sched_suspend;
+ }
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- /* If the current active policy is always_on, one needs to clamp the
- * MCU/L2 for reaching off-state
- */
- if (sched_suspend)
- kbdev->pm.backend.policy_change_clamp_state_to_off =
- CSF_DYNAMIC_PM_CORE_KEEP_ON & kbdev->pm.backend.csf_pm_sched_flags;
+
+ kbdev->pm.backend.policy_change_clamp_state_to_off = sched_suspend;
kbase_pm_update_state(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -392,13 +424,19 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
#if MALI_USE_CSF
/* Reverse the suspension done */
+ if (sched_suspend)
+ kbase_csf_scheduler_pm_resume_no_lock(kbdev);
+ rt_mutex_unlock(&scheduler->lock);
+
+ if (reset_op_prevented)
+ kbase_reset_gpu_allow(kbdev);
+
if (reset_gpu) {
dev_warn(kbdev->dev, "Resorting to GPU reset for policy change\n");
if (kbase_prepare_to_reset_gpu(kbdev, RESET_FLAGS_NONE))
kbase_reset_gpu(kbdev);
kbase_reset_gpu_wait(kbdev);
- } else if (sched_suspend)
- kbase_csf_scheduler_pm_resume(kbdev);
+ }
mutex_unlock(&kbdev->pm.backend.policy_change_lock);
#endif