summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSuzanne Candanedo <suzanne.candanedo@arm.com>2022-12-16 12:07:45 +0000
committerGuus Sliepen <gsliepen@google.com>2023-01-13 16:31:47 +0000
commitc48886fd473a20e28bb0199fc40f78ed1ec88b1d (patch)
tree0508d24a04d09627402ac38a8a57ae64bbf1fa06
parent422aa1fad7e63f16000ffb9303e816b54ef3d8ca (diff)
downloadgpu-android-gs-pantah-5.10-android13-qpr2-beta.tar.gz
Move the code to disable the MMU for a kctx (which also will flush the GPU caches for that kctx's Address Space) from the end of kctx termination to JM/CSF kctx-scheduler termination time. This ensures the GPU cached writes for that kctx are clear before we free the kctx's GPU memory pages in the region tracker, and before the MMU tables are freed. This in turn ensures the GPU cached writes do not land in freed memory (which would be a security issue). Change-Id: I2730a5762cb106ca2179cf9a2f789b8764f21901 (cherry picked from commit 09e14ccf27007dcb2cc0d4d64f5087ccc620a0b5) Provenance: https://code.ipdelivery.arm.com/c/GPU/mali-ddk/+/4810 Bug: 260123838
-rw-r--r--mali_kbase/context/mali_kbase_context.c11
-rw-r--r--mali_kbase/csf/mali_kbase_csf_scheduler.c15
-rw-r--r--mali_kbase/mali_kbase_ctx_sched.c14
-rw-r--r--mali_kbase/mali_kbase_ctx_sched.h12
-rw-r--r--mali_kbase/mali_kbase_js.c5
-rw-r--r--mali_kbase/mali_kbase_mem.c4
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu.c4
7 files changed, 46 insertions, 19 deletions
diff --git a/mali_kbase/context/mali_kbase_context.c b/mali_kbase/context/mali_kbase_context.c
index 95bd641..5fc1636 100644
--- a/mali_kbase/context/mali_kbase_context.c
+++ b/mali_kbase/context/mali_kbase_context.c
@@ -176,10 +176,6 @@ int kbase_context_common_init(struct kbase_context *kctx)
/* creating a context is considered a disjoint event */
kbase_disjoint_event(kctx->kbdev);
- kctx->as_nr = KBASEP_AS_NR_INVALID;
-
- atomic_set(&kctx->refcount, 0);
-
spin_lock_init(&kctx->mm_update_lock);
kctx->process_mm = NULL;
atomic_set(&kctx->nonmapped_pages, 0);
@@ -298,15 +294,8 @@ static void kbase_remove_kctx_from_process(struct kbase_context *kctx)
void kbase_context_common_term(struct kbase_context *kctx)
{
- unsigned long flags;
int pages;
- mutex_lock(&kctx->kbdev->mmu_hw_mutex);
- spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
- kbase_ctx_sched_remove_ctx(kctx);
- spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
- mutex_unlock(&kctx->kbdev->mmu_hw_mutex);
-
pages = atomic_read(&kctx->used_pages);
if (pages != 0)
dev_warn(kctx->kbdev->dev,
diff --git a/mali_kbase/csf/mali_kbase_csf_scheduler.c b/mali_kbase/csf/mali_kbase_csf_scheduler.c
index d33fa03..24616e1 100644
--- a/mali_kbase/csf/mali_kbase_csf_scheduler.c
+++ b/mali_kbase/csf/mali_kbase_csf_scheduler.c
@@ -6600,6 +6600,8 @@ int kbase_csf_scheduler_context_init(struct kbase_context *kctx)
int priority;
int err;
+ kbase_ctx_sched_init_ctx(kctx);
+
for (priority = 0; priority < KBASE_QUEUE_GROUP_PRIORITY_COUNT;
++priority) {
INIT_LIST_HEAD(&kctx->csf.sched.runnable_groups[priority]);
@@ -6616,7 +6618,8 @@ int kbase_csf_scheduler_context_init(struct kbase_context *kctx)
if (err) {
dev_err(kctx->kbdev->dev,
"Failed to initialize scheduler context kworker");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto alloc_wq_failed;
}
kthread_init_work(&kctx->csf.sched.sync_update_work,
@@ -6627,7 +6630,7 @@ int kbase_csf_scheduler_context_init(struct kbase_context *kctx)
if (err) {
dev_err(kctx->kbdev->dev,
"Failed to register a sync update callback");
- kbase_destroy_kworker_stack(&kctx->csf.sched.sync_update_worker);
+ goto event_wait_add_failed;
}
/* Per-kctx heap_info object initialization */
@@ -6635,6 +6638,12 @@ int kbase_csf_scheduler_context_init(struct kbase_context *kctx)
INIT_LIST_HEAD(&kctx->csf.sched.heap_info.mgr_link);
return err;
+
+event_wait_add_failed:
+ kbase_destroy_kworker_stack(&kctx->csf.sched.sync_update_worker);
+alloc_wq_failed:
+ kbase_ctx_sched_remove_ctx(kctx);
+ return err;
}
void kbase_csf_scheduler_context_term(struct kbase_context *kctx)
@@ -6642,6 +6651,8 @@ void kbase_csf_scheduler_context_term(struct kbase_context *kctx)
kbase_csf_event_wait_remove(kctx, check_group_sync_update_cb, kctx);
kthread_cancel_work_sync(&kctx->csf.sched.sync_update_work);
kbase_destroy_kworker_stack(&kctx->csf.sched.sync_update_worker);
+
+ kbase_ctx_sched_remove_ctx(kctx);
}
int kbase_csf_scheduler_init(struct kbase_device *kbdev)
diff --git a/mali_kbase/mali_kbase_ctx_sched.c b/mali_kbase/mali_kbase_ctx_sched.c
index 66149f9..fb05467 100644
--- a/mali_kbase/mali_kbase_ctx_sched.c
+++ b/mali_kbase/mali_kbase_ctx_sched.c
@@ -69,6 +69,12 @@ void kbase_ctx_sched_term(struct kbase_device *kbdev)
}
}
+void kbase_ctx_sched_init_ctx(struct kbase_context *kctx)
+{
+ kctx->as_nr = KBASEP_AS_NR_INVALID;
+ atomic_set(&kctx->refcount, 0);
+}
+
/* kbasep_ctx_sched_find_as_for_ctx - Find a free address space
*
* @kbdev: The context for which to find a free address space
@@ -201,9 +207,10 @@ void kbase_ctx_sched_release_ctx(struct kbase_context *kctx)
void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx)
{
struct kbase_device *const kbdev = kctx->kbdev;
+ unsigned long flags;
- lockdep_assert_held(&kbdev->mmu_hw_mutex);
- lockdep_assert_held(&kbdev->hwaccess_lock);
+ mutex_lock(&kbdev->mmu_hw_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
WARN_ON(atomic_read(&kctx->refcount) != 0);
@@ -215,6 +222,9 @@ void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx)
kbdev->as_to_kctx[kctx->as_nr] = NULL;
kctx->as_nr = KBASEP_AS_NR_INVALID;
}
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->mmu_hw_mutex);
}
void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev)
diff --git a/mali_kbase/mali_kbase_ctx_sched.h b/mali_kbase/mali_kbase_ctx_sched.h
index f787cc3..5a8d175 100644
--- a/mali_kbase/mali_kbase_ctx_sched.h
+++ b/mali_kbase/mali_kbase_ctx_sched.h
@@ -60,6 +60,15 @@ int kbase_ctx_sched_init(struct kbase_device *kbdev);
void kbase_ctx_sched_term(struct kbase_device *kbdev);
/**
+ * kbase_ctx_sched_ctx_init - Initialize per-context data fields for scheduling
+ * @kctx: The context to initialize
+ *
+ * This must be called during context initialization before any other context
+ * scheduling functions are called on @kctx
+ */
+void kbase_ctx_sched_init_ctx(struct kbase_context *kctx);
+
+/**
* kbase_ctx_sched_retain_ctx - Retain a reference to the @ref kbase_context
* @kctx: The context to which to retain a reference
*
@@ -113,9 +122,6 @@ void kbase_ctx_sched_release_ctx(struct kbase_context *kctx);
* This function should be called when a context is being destroyed. The
* context must no longer have any reference. If it has been assigned an
* address space before then the AS will be unprogrammed.
- *
- * The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
- * held whilst calling this function.
*/
void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx);
diff --git a/mali_kbase/mali_kbase_js.c b/mali_kbase/mali_kbase_js.c
index df729f5..1eabd1d 100644
--- a/mali_kbase/mali_kbase_js.c
+++ b/mali_kbase/mali_kbase_js.c
@@ -642,6 +642,8 @@ int kbasep_js_kctx_init(struct kbase_context *const kctx)
KBASE_DEBUG_ASSERT(kbdev != NULL);
+ kbase_ctx_sched_init_ctx(kctx);
+
for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
INIT_LIST_HEAD(&kctx->jctx.sched_info.ctx.ctx_list_entry[i]);
@@ -715,6 +717,8 @@ void kbasep_js_kctx_term(struct kbase_context *kctx)
kbase_backend_ctx_count_changed(kbdev);
mutex_unlock(&kbdev->js_data.runpool_mutex);
}
+
+ kbase_ctx_sched_remove_ctx(kctx);
}
/*
@@ -4106,4 +4110,3 @@ base_jd_prio kbase_js_priority_check(struct kbase_device *kbdev, base_jd_prio pr
req_priority);
return kbasep_js_sched_prio_to_atom_prio(kbdev, out_priority);
}
-
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index 1526225..ce6e94c 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -827,6 +827,10 @@ static void kbase_region_tracker_erase_rbtree(struct rb_root *rbtree)
void kbase_region_tracker_term(struct kbase_context *kctx)
{
+ WARN(kctx->as_nr != KBASEP_AS_NR_INVALID,
+ "kctx-%d_%d must first be scheduled out to flush GPU caches+tlbs before erasing remaining regions",
+ kctx->tgid, kctx->id);
+
kbase_gpu_vm_lock(kctx);
kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_same);
kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_custom);
diff --git a/mali_kbase/mmu/mali_kbase_mmu.c b/mali_kbase/mmu/mali_kbase_mmu.c
index c98d830..4828cdc 100644
--- a/mali_kbase/mmu/mali_kbase_mmu.c
+++ b/mali_kbase/mmu/mali_kbase_mmu.c
@@ -2752,6 +2752,10 @@ void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut)
{
int level;
+ WARN((mmut->kctx) && (mmut->kctx->as_nr != KBASEP_AS_NR_INVALID),
+ "kctx-%d_%d must first be scheduled out to flush GPU caches+tlbs before tearing down MMU tables",
+ mmut->kctx->tgid, mmut->kctx->id);
+
if (mmut->pgd != KBASE_MMU_INVALID_PGD_ADDRESS) {
rt_mutex_lock(&mmut->mmu_lock);
mmu_teardown_level(kbdev, mmut, mmut->pgd, MIDGARD_MMU_TOPLEVEL);