summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_ctx_sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/mali_kbase_ctx_sched.c')
-rw-r--r--mali_kbase/mali_kbase_ctx_sched.c69
1 files changed, 48 insertions, 21 deletions
diff --git a/mali_kbase/mali_kbase_ctx_sched.c b/mali_kbase/mali_kbase_ctx_sched.c
index 8026e7f..ea4f300 100644
--- a/mali_kbase/mali_kbase_ctx_sched.c
+++ b/mali_kbase/mali_kbase_ctx_sched.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2017-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -23,7 +23,9 @@
#include <mali_kbase_defs.h>
#include "mali_kbase_ctx_sched.h"
#include "tl/mali_kbase_tracepoints.h"
-#if !MALI_USE_CSF
+#if MALI_USE_CSF
+#include "mali_kbase_reset_gpu.h"
+#else
#include <mali_kbase_hwaccess_jm.h>
#endif
@@ -67,6 +69,12 @@ void kbase_ctx_sched_term(struct kbase_device *kbdev)
}
}
+void kbase_ctx_sched_init_ctx(struct kbase_context *kctx)
+{
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+ atomic_set(&kctx->refcount, 0);
+}
+
/* kbasep_ctx_sched_find_as_for_ctx - Find a free address space
*
* @kbdev: The context for which to find a free address space
@@ -111,7 +119,7 @@ int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx)
if (atomic_inc_return(&kctx->refcount) == 1) {
int const free_as = kbasep_ctx_sched_find_as_for_ctx(kctx);
- if (free_as != KBASEP_AS_NR_INVALID) {
+ if (free_as >= 0) {
kbdev->as_free &= ~(1u << free_as);
/* Only program the MMU if the context has not been
* assigned the same address space before.
@@ -152,9 +160,23 @@ void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx)
struct kbase_device *const kbdev = kctx->kbdev;
lockdep_assert_held(&kbdev->hwaccess_lock);
- WARN_ON(atomic_read(&kctx->refcount) == 0);
- WARN_ON(kctx->as_nr == KBASEP_AS_NR_INVALID);
- WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx);
+#if MALI_USE_CSF
+ /* We expect the context to be active when this function is called,
+ * except for the case where a page fault is reported for it during
+ * the GPU reset sequence, in which case we can expect the refcount
+ * to be 0.
+ */
+ WARN_ON(!atomic_read(&kctx->refcount) && !kbase_reset_gpu_is_active(kbdev));
+#else
+ /* We expect the context to be active (and thus refcount should be non-zero)
+ * when this function is called
+ */
+ WARN_ON(!atomic_read(&kctx->refcount));
+#endif
+ if (likely((kctx->as_nr >= 0) && (kctx->as_nr < BASE_MAX_NR_AS)))
+ WARN_ON(kbdev->as_to_kctx[kctx->as_nr] != kctx);
+ else
+ WARN(true, "Invalid as_nr(%d)", kctx->as_nr);
atomic_inc(&kctx->refcount);
}
@@ -168,16 +190,17 @@ void kbase_ctx_sched_release_ctx(struct kbase_context *kctx)
new_ref_count = atomic_dec_return(&kctx->refcount);
if (new_ref_count == 0) {
- kbdev->as_free |= (1u << kctx->as_nr);
- if (kbase_ctx_flag(kctx, KCTX_AS_DISABLED_ON_FAULT)) {
- KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(
- kbdev, kctx->id);
- kbdev->as_to_kctx[kctx->as_nr] = NULL;
- kctx->as_nr = KBASEP_AS_NR_INVALID;
- kbase_ctx_flag_clear(kctx, KCTX_AS_DISABLED_ON_FAULT);
+ if (likely((kctx->as_nr >= 0) && (kctx->as_nr < BASE_MAX_NR_AS))) {
+ kbdev->as_free |= (1u << kctx->as_nr);
+ if (kbase_ctx_flag(kctx, KCTX_AS_DISABLED_ON_FAULT)) {
+ KBASE_TLSTREAM_TL_KBASE_CTX_UNASSIGN_AS(kbdev, kctx->id);
+ kbdev->as_to_kctx[kctx->as_nr] = NULL;
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+ kbase_ctx_flag_clear(kctx, KCTX_AS_DISABLED_ON_FAULT);
#if !MALI_USE_CSF
- kbase_backend_slot_kctx_purge_locked(kbdev, kctx);
+ kbase_backend_slot_kctx_purge_locked(kbdev, kctx);
#endif
+ }
}
}
@@ -187,13 +210,14 @@ void kbase_ctx_sched_release_ctx(struct kbase_context *kctx)
void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx)
{
struct kbase_device *const kbdev = kctx->kbdev;
+ unsigned long flags;
- lockdep_assert_held(&kbdev->mmu_hw_mutex);
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
WARN_ON(atomic_read(&kctx->refcount) != 0);
- if (kctx->as_nr != KBASEP_AS_NR_INVALID) {
+ if ((kctx->as_nr >= 0) && (kctx->as_nr < BASE_MAX_NR_AS)) {
if (kbdev->pm.backend.gpu_powered)
kbase_mmu_disable(kctx);
@@ -201,6 +225,9 @@ void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx)
kbdev->as_to_kctx[kctx->as_nr] = NULL;
kctx->as_nr = KBASEP_AS_NR_INVALID;
}
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
}
void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev)
@@ -212,6 +239,8 @@ void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev)
WARN_ON(!kbdev->pm.backend.gpu_powered);
+ kbdev->mmu_unresponsive = false;
+
for (i = 0; i != kbdev->nr_hw_address_spaces; ++i) {
struct kbase_context *kctx;
@@ -264,7 +293,7 @@ struct kbase_context *kbase_ctx_sched_as_to_ctx_refcount(
found_kctx = kbdev->as_to_kctx[as_nr];
- if (!WARN_ON(found_kctx == NULL))
+ if (found_kctx)
kbase_ctx_sched_retain_ctx_refcount(found_kctx);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -313,16 +342,14 @@ struct kbase_context *kbase_ctx_sched_as_to_ctx_nolock(
bool kbase_ctx_sched_inc_refcount_nolock(struct kbase_context *kctx)
{
bool result = false;
- int as_nr;
if (WARN_ON(kctx == NULL))
return result;
lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
- as_nr = kctx->as_nr;
if (atomic_read(&kctx->refcount) > 0) {
- KBASE_DEBUG_ASSERT(as_nr >= 0);
+ KBASE_DEBUG_ASSERT(kctx->as_nr >= 0);
kbase_ctx_sched_retain_ctx_refcount(kctx);
KBASE_KTRACE_ADD(kctx->kbdev, SCHED_RETAIN_CTX_NOLOCK, kctx,