summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTu Vuong <tu.vuong@arm.com>2022-06-21 17:19:06 +0100
committerGuus Sliepen <gsliepen@google.com>2023-09-19 17:02:09 +0000
commit37ec66030fa243c4a9bf7e3e6abd4269f8b0fd07 (patch)
treef208f1a20683556201e275c652bd9f8065b52853
parent17c8fdb818b9520ec6e48f9f5163ab8fe014da73 (diff)
downloadgpu-android-gs-shusky-udc-d1.tar.gz
GPUCORE-34589 jit_lock all JIT operationsandroid-14.0.0_r0.18android-gs-shusky-udc-d1
This commit protects all JIT operations with the KCPU queues context jit_lock. TI2: 893173 Type: Fix Change-Id: If500064c18827e0071651274ccba541a68168ac2 Merged-In: I84fb19e7ce5f28e735d44a4993d51bd985aac80b (cherry picked from commit 1b3511e3f68a01459d7c8c09711a8aabd374853c) (cherry picked from commit bbabc5947dcd1fa21d3a0d8cb996238153bd57d1) Bug: 298157271 Provenance: https://code.ipdelivery.arm.com/c/GPU/mali-ddk/+/5448
-rw-r--r--mali_kbase/csf/mali_kbase_csf_defs.h4
-rw-r--r--mali_kbase/csf/mali_kbase_csf_kcpu.c68
-rw-r--r--mali_kbase/mali_kbase_mem.c16
3 files changed, 50 insertions, 38 deletions
diff --git a/mali_kbase/csf/mali_kbase_csf_defs.h b/mali_kbase/csf/mali_kbase_csf_defs.h
index 1cbb19a..700af75 100644
--- a/mali_kbase/csf/mali_kbase_csf_defs.h
+++ b/mali_kbase/csf/mali_kbase_csf_defs.h
@@ -551,7 +551,7 @@ struct kbase_queue_group {
* @num_cmds: The number of commands that have been enqueued across
* all the KCPU command queues. This could be used as a
* timestamp to determine the command's enqueueing time.
- * @jit_lock: Lock protecting jit_cmds_head and jit_blocked_queues.
+ * @jit_lock: Lock to serialise JIT operations.
* @jit_cmds_head: A list of the just-in-time memory commands, both
* allocate & free, in submission order, protected
* by kbase_csf_kcpu_queue_context.lock.
@@ -565,7 +565,7 @@ struct kbase_csf_kcpu_queue_context {
struct kbase_kcpu_command_queue *array[KBASEP_MAX_KCPU_QUEUES];
DECLARE_BITMAP(in_use, KBASEP_MAX_KCPU_QUEUES);
atomic64_t num_cmds;
- spinlock_t jit_lock;
+ struct mutex jit_lock;
struct list_head jit_cmds_head;
struct list_head jit_blocked_queues;
};
diff --git a/mali_kbase/csf/mali_kbase_csf_kcpu.c b/mali_kbase/csf/mali_kbase_csf_kcpu.c
index 81e3d96..89823ba 100644
--- a/mali_kbase/csf/mali_kbase_csf_kcpu.c
+++ b/mali_kbase/csf/mali_kbase_csf_kcpu.c
@@ -190,8 +190,7 @@ static void kbase_jit_add_to_pending_alloc_list(
struct kbase_kcpu_command_queue *blocked_queue;
lockdep_assert_held(&queue->lock);
-
- spin_lock(&kctx->csf.kcpu_queues.jit_lock);
+ lockdep_assert_held(&kctx->csf.kcpu_queues.jit_lock);
list_for_each_entry(blocked_queue,
&kctx->csf.kcpu_queues.jit_blocked_queues,
@@ -207,8 +206,6 @@ static void kbase_jit_add_to_pending_alloc_list(
}
list_add_tail(&queue->jit_blocked, target_list_head);
-
- spin_unlock(&kctx->csf.kcpu_queues.jit_lock);
}
/**
@@ -240,25 +237,26 @@ static int kbase_kcpu_jit_allocate_process(
lockdep_assert_held(&queue->lock);
- if (alloc_info->blocked) {
- spin_lock(&kctx->csf.kcpu_queues.jit_lock);
- list_del(&queue->jit_blocked);
- spin_unlock(&kctx->csf.kcpu_queues.jit_lock);
- alloc_info->blocked = false;
- }
-
if (WARN_ON(!info))
return -EINVAL;
+ mutex_lock(&kctx->csf.kcpu_queues.jit_lock);
+
/* Check if all JIT IDs are not in use */
for (i = 0; i < count; i++, info++) {
/* The JIT ID is still in use so fail the allocation */
if (kctx->jit_alloc[info->id]) {
dev_dbg(kctx->kbdev->dev, "JIT ID still in use");
- return -EINVAL;
+ ret = -EINVAL;
+ goto fail;
}
}
+ if (alloc_info->blocked) {
+ list_del(&queue->jit_blocked);
+ alloc_info->blocked = false;
+ }
+
/* Now start the allocation loop */
for (i = 0, info = alloc_info->info; i < count; i++, info++) {
/* Create a JIT allocation */
@@ -267,7 +265,6 @@ static int kbase_kcpu_jit_allocate_process(
bool can_block = false;
struct kbase_kcpu_command const *jit_cmd;
- spin_lock(&kctx->csf.kcpu_queues.jit_lock);
list_for_each_entry(jit_cmd, &kctx->csf.kcpu_queues.jit_cmds_head, info.jit_alloc.node) {
if (jit_cmd == cmd)
break;
@@ -286,7 +283,6 @@ static int kbase_kcpu_jit_allocate_process(
}
}
}
- spin_unlock(&kctx->csf.kcpu_queues.jit_lock);
if (!can_block) {
/*
@@ -295,7 +291,7 @@ static int kbase_kcpu_jit_allocate_process(
*/
dev_warn_ratelimited(kctx->kbdev->dev, "JIT alloc command failed: %pK\n", cmd);
ret = -ENOMEM;
- goto fail;
+ goto fail_rollback;
}
/* There are pending frees for an active allocation
@@ -313,7 +309,8 @@ static int kbase_kcpu_jit_allocate_process(
kctx->jit_alloc[info->id] = NULL;
}
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto fail;
}
/* Bind it to the user provided ID. */
@@ -329,7 +326,7 @@ static int kbase_kcpu_jit_allocate_process(
KBASE_REG_CPU_WR, &mapping);
if (!ptr) {
ret = -ENOMEM;
- goto fail;
+ goto fail_rollback;
}
reg = kctx->jit_alloc[info->id];
@@ -338,9 +335,11 @@ static int kbase_kcpu_jit_allocate_process(
kbase_vunmap(kctx, &mapping);
}
+ mutex_unlock(&kctx->csf.kcpu_queues.jit_lock);
+
return 0;
-fail:
+fail_rollback:
/* Roll back completely */
for (i = 0, info = alloc_info->info; i < count; i++, info++) {
/* Free the allocations that were successful.
@@ -353,6 +352,8 @@ fail:
kctx->jit_alloc[info->id] = KBASE_RESERVED_REG_JIT_ALLOC;
}
+fail:
+ mutex_unlock(&kctx->csf.kcpu_queues.jit_lock);
return ret;
}
@@ -415,13 +416,13 @@ static int kbase_kcpu_jit_allocate_prepare(
}
current_command->type = BASE_KCPU_COMMAND_TYPE_JIT_ALLOC;
- spin_lock(&kctx->csf.kcpu_queues.jit_lock);
- list_add_tail(&current_command->info.jit_alloc.node,
- &kctx->csf.kcpu_queues.jit_cmds_head);
- spin_unlock(&kctx->csf.kcpu_queues.jit_lock);
current_command->info.jit_alloc.info = info;
current_command->info.jit_alloc.count = count;
current_command->info.jit_alloc.blocked = false;
+ mutex_lock(&kctx->csf.kcpu_queues.jit_lock);
+ list_add_tail(&current_command->info.jit_alloc.node,
+ &kctx->csf.kcpu_queues.jit_cmds_head);
+ mutex_unlock(&kctx->csf.kcpu_queues.jit_lock);
return 0;
out_free:
@@ -442,7 +443,7 @@ static void kbase_kcpu_jit_allocate_finish(
{
lockdep_assert_held(&queue->lock);
- spin_lock(&queue->kctx->csf.kcpu_queues.jit_lock);
+ mutex_lock(&queue->kctx->csf.kcpu_queues.jit_lock);
/* Remove this command from the jit_cmds_head list */
list_del(&cmd->info.jit_alloc.node);
@@ -456,7 +457,7 @@ static void kbase_kcpu_jit_allocate_finish(
cmd->info.jit_alloc.blocked = false;
}
- spin_unlock(&queue->kctx->csf.kcpu_queues.jit_lock);
+ mutex_unlock(&queue->kctx->csf.kcpu_queues.jit_lock);
kfree(cmd->info.jit_alloc.info);
}
@@ -470,17 +471,17 @@ static void kbase_kcpu_jit_retry_pending_allocs(struct kbase_context *kctx)
{
struct kbase_kcpu_command_queue *blocked_queue;
+ lockdep_assert_held(&kctx->csf.kcpu_queues.jit_lock);
+
/*
* Reschedule all queues blocked by JIT_ALLOC commands.
* NOTE: This code traverses the list of blocked queues directly. It
* only works as long as the queued works are not executed at the same
* time. This precondition is true since we're holding the
- * kbase_csf_kcpu_queue_context.lock .
+ * kbase_csf_kcpu_queue_context.jit_lock .
*/
- spin_lock(&kctx->csf.kcpu_queues.jit_lock);
list_for_each_entry(blocked_queue, &kctx->csf.kcpu_queues.jit_blocked_queues, jit_blocked)
kthread_queue_work(&blocked_queue->csf_kcpu_worker, &blocked_queue->work);
- spin_unlock(&kctx->csf.kcpu_queues.jit_lock);
}
static int kbase_kcpu_jit_free_process(struct kbase_kcpu_command_queue *queue,
@@ -498,6 +499,7 @@ static int kbase_kcpu_jit_free_process(struct kbase_kcpu_command_queue *queue,
return -EINVAL;
lockdep_assert_held(&queue->lock);
+ mutex_lock(&kctx->csf.kcpu_queues.jit_lock);
KBASE_TLSTREAM_TL_KBASE_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END(queue->kctx->kbdev,
queue);
@@ -533,11 +535,11 @@ static int kbase_kcpu_jit_free_process(struct kbase_kcpu_command_queue *queue,
* Remove this command from the jit_cmds_head list and retry pending
* allocations.
*/
- spin_lock(&kctx->csf.kcpu_queues.jit_lock);
list_del(&cmd->info.jit_free.node);
- spin_unlock(&kctx->csf.kcpu_queues.jit_lock);
kbase_kcpu_jit_retry_pending_allocs(kctx);
+ mutex_unlock(&kctx->csf.kcpu_queues.jit_lock);
+
/* Free the list of ids */
kfree(ids);
@@ -602,12 +604,12 @@ static int kbase_kcpu_jit_free_prepare(
}
current_command->type = BASE_KCPU_COMMAND_TYPE_JIT_FREE;
- spin_lock(&kctx->csf.kcpu_queues.jit_lock);
- list_add_tail(&current_command->info.jit_free.node,
- &kctx->csf.kcpu_queues.jit_cmds_head);
- spin_unlock(&kctx->csf.kcpu_queues.jit_lock);
current_command->info.jit_free.ids = ids;
current_command->info.jit_free.count = count;
+ mutex_lock(&kctx->csf.kcpu_queues.jit_lock);
+ list_add_tail(&current_command->info.jit_free.node,
+ &kctx->csf.kcpu_queues.jit_cmds_head);
+ mutex_unlock(&kctx->csf.kcpu_queues.jit_lock);
return 0;
out_free:
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index 0d00b14..fb4d763 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -3814,7 +3814,7 @@ int kbase_jit_init(struct kbase_context *kctx)
INIT_WORK(&kctx->jit_work, kbase_jit_destroy_worker);
#if MALI_USE_CSF
- spin_lock_init(&kctx->csf.kcpu_queues.jit_lock);
+ mutex_init(&kctx->csf.kcpu_queues.jit_lock);
INIT_LIST_HEAD(&kctx->csf.kcpu_queues.jit_cmds_head);
INIT_LIST_HEAD(&kctx->csf.kcpu_queues.jit_blocked_queues);
#else /* !MALI_USE_CSF */
@@ -4254,7 +4254,9 @@ static bool jit_allow_allocate(struct kbase_context *kctx,
{
#if !MALI_USE_CSF
lockdep_assert_held(&kctx->jctx.lock);
-#endif
+#else /* MALI_USE_CSF */
+ lockdep_assert_held(&kctx->csf.kcpu_queues.jit_lock);
+#endif /* !MALI_USE_CSF */
#if MALI_JIT_PRESSURE_LIMIT_BASE
if (!ignore_pressure_limit &&
@@ -4347,7 +4349,9 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
#if !MALI_USE_CSF
lockdep_assert_held(&kctx->jctx.lock);
-#endif
+#else /* MALI_USE_CSF */
+ lockdep_assert_held(&kctx->csf.kcpu_queues.jit_lock);
+#endif /* !MALI_USE_CSF */
if (!jit_allow_allocate(kctx, info, ignore_pressure_limit))
return NULL;
@@ -4579,6 +4583,12 @@ void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg)
{
u64 old_pages;
+#if !MALI_USE_CSF
+ lockdep_assert_held(&kctx->jctx.lock);
+#else /* MALI_USE_CSF */
+ lockdep_assert_held(&kctx->csf.kcpu_queues.jit_lock);
+#endif /* !MALI_USE_CSF */
+
/* JIT id not immediately available here, so use 0u */
trace_mali_jit_free(reg, 0u);