summaryrefslogtreecommitdiff
path: root/mali_kbase/device/backend/mali_kbase_device_hw_csf.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/device/backend/mali_kbase_device_hw_csf.c')
-rw-r--r--mali_kbase/device/backend/mali_kbase_device_hw_csf.c70
1 files changed, 61 insertions, 9 deletions
diff --git a/mali_kbase/device/backend/mali_kbase_device_hw_csf.c b/mali_kbase/device/backend/mali_kbase_device_hw_csf.c
index e2228ca..c837f5a 100644
--- a/mali_kbase/device/backend/mali_kbase_device_hw_csf.c
+++ b/mali_kbase/device/backend/mali_kbase_device_hw_csf.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -24,6 +24,7 @@
#include <backend/gpu/mali_kbase_instr_internal.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <device/mali_kbase_device.h>
+#include <device/mali_kbase_device_internal.h>
#include <mali_kbase_reset_gpu.h>
#include <mmu/mali_kbase_mmu.h>
#include <mali_kbase_ctx_sched.h>
@@ -57,7 +58,7 @@ static void kbase_gpu_fault_interrupt(struct kbase_device *kbdev)
{
const u32 status = kbase_reg_read(kbdev,
GPU_CONTROL_REG(GPU_FAULTSTATUS));
- const bool as_valid = status & GPU_FAULTSTATUS_JASID_VALID_FLAG;
+ const bool as_valid = status & GPU_FAULTSTATUS_JASID_VALID_MASK;
const u32 as_nr = (status & GPU_FAULTSTATUS_JASID_MASK) >>
GPU_FAULTSTATUS_JASID_SHIFT;
bool bus_fault = (status & GPU_FAULTSTATUS_EXCEPTION_TYPE_MASK) ==
@@ -83,6 +84,37 @@ static void kbase_gpu_fault_interrupt(struct kbase_device *kbdev)
}
+#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
+/* When the GLB_PWROFF_TIMER expires, FW will write the SHADER_PWROFF register, this sequence
+ * follows:
+ * - SHADER_PWRTRANS goes high
+ * - SHADER_READY goes low
+ * - Iterator is told not to send any more work to the core
+ * - Wait for the core to drain
+ * - SHADER_PWRACTIVE goes low
+ * - Do an IPA sample
+ * - Flush the core
+ * - Apply functional isolation
+ * - Turn the clock off
+ * - Put the core in reset
+ * - Apply electrical isolation
+ * - Power off the core
+ * - SHADER_PWRTRANS goes low
+ *
+ * It's therefore safe to turn off the SC rail when:
+ * - SHADER_READY == 0, this means the SC's last transitioned to OFF
+ * - SHADER_PWRTRANS == 0, this means the SC's have finished transitioning
+ */
+static bool safe_to_turn_off_sc_rail(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ return (kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_READY_HI)) ||
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_READY_LO)) ||
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_HI)) ||
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_LO))) == 0;
+}
+#endif /* CONFIG_MALI_HOST_CONTROLS_SC_RAILS */
+
void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
{
KBASE_KTRACE_ADD(kbdev, CORE_GPU_IRQ, NULL, val);
@@ -115,6 +147,9 @@ void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
GPU_EXCEPTION_TYPE_SW_FAULT_0,
} } };
+ kbase_debug_csf_fault_notify(kbdev, scheduler->active_protm_grp->kctx,
+ DF_GPU_PROTECTED_FAULT);
+
scheduler->active_protm_grp->faulted = true;
kbase_csf_add_group_fatal_error(
scheduler->active_protm_grp, &err_payload);
@@ -146,7 +181,6 @@ void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
dev_dbg(kbdev->dev, "Doorbell mirror interrupt received");
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- WARN_ON(!kbase_csf_scheduler_get_nr_active_csgs(kbdev));
kbase_pm_disable_db_mirror_interrupt(kbdev);
kbdev->pm.backend.exit_gpu_sleep_mode = true;
kbase_csf_scheduler_invoke_tick(kbdev);
@@ -166,6 +200,16 @@ void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
if (val & CLEAN_CACHES_COMPLETED)
kbase_clean_caches_done(kbdev);
+#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
+ if (val & POWER_CHANGED_ALL) {
+ unsigned long flags;
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbdev->pm.backend.sc_pwroff_safe = safe_to_turn_off_sc_rail(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ }
+#endif
+
+
if (val & (POWER_CHANGED_ALL | MCU_STATUS_GPU_IRQ)) {
kbase_pm_power_changed(kbdev);
} else if (val & CLEAN_CACHES_COMPLETED) {
@@ -184,7 +228,7 @@ void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
}
#if !IS_ENABLED(CONFIG_MALI_NO_MALI)
-static bool kbase_is_register_accessible(u32 offset)
+bool kbase_is_register_accessible(u32 offset)
{
#ifdef CONFIG_MALI_DEBUG
if (((offset >= MCU_SUBSYSTEM_BASE) && (offset < IPA_CONTROL_BASE)) ||
@@ -196,11 +240,16 @@ static bool kbase_is_register_accessible(u32 offset)
return true;
}
+#endif /* !IS_ENABLED(CONFIG_MALI_NO_MALI) */
+#if IS_ENABLED(CONFIG_MALI_REAL_HW)
void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value)
{
- KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
- KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+ if (WARN_ON(!kbdev->pm.backend.gpu_powered))
+ return;
+
+ if (WARN_ON(kbdev->dev == NULL))
+ return;
if (!kbase_is_register_accessible(offset))
return;
@@ -220,8 +269,11 @@ u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset)
{
u32 val;
- KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
- KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
+ if (WARN_ON(!kbdev->pm.backend.gpu_powered))
+ return 0;
+
+ if (WARN_ON(kbdev->dev == NULL))
+ return 0;
if (!kbase_is_register_accessible(offset))
return 0;
@@ -238,4 +290,4 @@ u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset)
return val;
}
KBASE_EXPORT_TEST_API(kbase_reg_read);
-#endif /* !IS_ENABLED(CONFIG_MALI_NO_MALI) */
+#endif /* IS_ENABLED(CONFIG_MALI_REAL_HW) */