diff options
author | Devika Krishnadas <kdevika@google.com> | 2022-05-06 20:45:05 +0000 |
---|---|---|
committer | Devika Krishnadas <kdevika@google.com> | 2022-05-06 22:36:35 +0000 |
commit | 5eb4e28d03ffb5e035bf7d39f24d2d0e33daa07f (patch) | |
tree | c082ab371d30431f21b6c544aef1b76c2236e562 | |
parent | 53773b3ec576097435a5a2e3ae81ab601549caff (diff) | |
download | gpu-android-gs-bluejay-5.10-android13.tar.gz |
mali_kbase: use RT_MUTEX for some job submission and context locksandroid-t-beta-4_r0.6android-t-beta-4_r0.5android-t-beta-4_r0.4android-t-beta-3_r0.4android-t-beta-3.3_r0.4android-t-beta-3.2_r0.4android-13.0.0_r0.5android-13.0.0_r0.44android-13.0.0_r0.43android-13.0.0_r0.4android-13.0.0_r0.20android-13.0.0_r0.19android-13.0.0_r0.15android-13.0.0_r0.14android-gs-raviole-5.10-t-beta-4android-gs-raviole-5.10-t-beta-3android-gs-raviole-5.10-android13android-gs-bluejay-5.10-t-beta-4android-gs-bluejay-5.10-android13
Bug: 230692528
Test: smoke test
Signed-off-by: Devika Krishnadas <kdevika@google.com>
Change-Id: I7d5da6c0a55ccfc6e789ecb74f0af71aac9d5be3
-rw-r--r-- | mali_kbase/backend/gpu/mali_kbase_jm_as.c | 16 | ||||
-rw-r--r-- | mali_kbase/backend/gpu/mali_kbase_jm_hw.c | 8 | ||||
-rw-r--r-- | mali_kbase/context/backend/mali_kbase_context_jm.c | 4 | ||||
-rw-r--r-- | mali_kbase/jm/mali_kbase_jm_defs.h | 2 | ||||
-rw-r--r-- | mali_kbase/jm/mali_kbase_js_defs.h | 4 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_core_linux.c | 4 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_event.c | 8 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_jd.c | 48 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_jd_debugfs.c | 4 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_js.c | 130 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_mem.c | 8 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_softjobs.c | 24 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu.c | 8 |
13 files changed, 134 insertions, 134 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_as.c b/mali_kbase/backend/gpu/mali_kbase_jm_as.c index 888aa59..309e5c7 100644 --- a/mali_kbase/backend/gpu/mali_kbase_jm_as.c +++ b/mali_kbase/backend/gpu/mali_kbase_jm_as.c @@ -128,7 +128,7 @@ int kbase_backend_find_and_release_free_address_space( js_devdata = &kbdev->js_data; js_kctx_info = &kctx->jctx.sched_info; - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); mutex_lock(&js_devdata->runpool_mutex); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); @@ -154,7 +154,7 @@ int kbase_backend_find_and_release_free_address_space( spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); return KBASEP_AS_NR_INVALID; } @@ -167,11 +167,11 @@ int kbase_backend_find_and_release_free_address_space( */ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); /* Release context from address space */ - mutex_lock(&as_js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&as_js_kctx_info->ctx.jsctx_mutex); mutex_lock(&js_devdata->runpool_mutex); kbasep_js_runpool_release_ctx_nolock(kbdev, as_kctx); @@ -182,7 +182,7 @@ int kbase_backend_find_and_release_free_address_space( true); mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex); return i; } @@ -191,9 +191,9 @@ int kbase_backend_find_and_release_free_address_space( * continue looking for free AS */ mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); mutex_lock(&js_devdata->runpool_mutex); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); } @@ -202,7 +202,7 @@ int kbase_backend_find_and_release_free_address_space( spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); return KBASEP_AS_NR_INVALID; } diff --git a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c index 609625d..32bdf72 100644 --- a/mali_kbase/backend/gpu/mali_kbase_jm_hw.c +++ b/mali_kbase/backend/gpu/mali_kbase_jm_hw.c @@ -873,10 +873,10 @@ exit: dev_dbg(kbdev->dev, "Zap: Finished Context %pK", kctx); /* Ensure that the signallers of the waitqs have finished */ - mutex_lock(&kctx->jctx.lock); - mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); - mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_unlock(&kctx->jctx.lock); } u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev) diff --git a/mali_kbase/context/backend/mali_kbase_context_jm.c b/mali_kbase/context/backend/mali_kbase_context_jm.c index 8b8aefb..74402ec 100644 --- a/mali_kbase/context/backend/mali_kbase_context_jm.c +++ b/mali_kbase/context/backend/mali_kbase_context_jm.c @@ -91,7 +91,7 @@ static int kbase_context_submit_check(struct kbase_context *kctx) base_context_create_flags const flags = kctx->create_flags; - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags); /* Translate the flags */ @@ -99,7 +99,7 @@ static int kbase_context_submit_check(struct kbase_context *kctx) kbase_ctx_flag_clear(kctx, KCTX_SUBMIT_DISABLED); spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); return 0; } diff --git a/mali_kbase/jm/mali_kbase_jm_defs.h b/mali_kbase/jm/mali_kbase_jm_defs.h index 54dc3c1..3c4d6b2 100644 --- a/mali_kbase/jm/mali_kbase_jm_defs.h +++ b/mali_kbase/jm/mali_kbase_jm_defs.h @@ -847,7 +847,7 @@ struct kbase_jd_renderpass { * @max_priority: Max priority level allowed for this context. */ struct kbase_jd_context { - struct mutex lock; + struct rt_mutex lock; struct kbasep_js_kctx_info sched_info; struct kbase_jd_atom atoms[BASE_JD_ATOM_COUNT]; struct kbase_jd_renderpass renderpasses[BASE_JD_RP_COUNT]; diff --git a/mali_kbase/jm/mali_kbase_js_defs.h b/mali_kbase/jm/mali_kbase_js_defs.h index 652f383..c5cb9ea 100644 --- a/mali_kbase/jm/mali_kbase_js_defs.h +++ b/mali_kbase/jm/mali_kbase_js_defs.h @@ -329,7 +329,7 @@ struct kbasep_js_device_data { u32 nr_contexts_pullable; atomic_t nr_contexts_runnable; atomic_t soft_job_timeout_ms; - struct mutex queue_mutex; + struct rt_mutex queue_mutex; /* * Run Pool mutex, for managing contexts within the runpool. * Unless otherwise specified, you must hold this lock whilst accessing @@ -370,7 +370,7 @@ struct kbasep_js_device_data { */ struct kbasep_js_kctx_info { struct kbase_jsctx { - struct mutex jsctx_mutex; + struct rt_mutex jsctx_mutex; u32 nr_jobs; u32 ctx_attr_ref_count[KBASEP_JS_CTX_ATTR_COUNT]; diff --git a/mali_kbase/mali_kbase_core_linux.c b/mali_kbase/mali_kbase_core_linux.c index f9f7ccb..8cf6848 100644 --- a/mali_kbase/mali_kbase_core_linux.c +++ b/mali_kbase/mali_kbase_core_linux.c @@ -795,7 +795,7 @@ static int kbase_api_set_flags(struct kbase_file *kfile, */ #else js_kctx_info = &kctx->jctx.sched_info; - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags); /* Translate the flags */ if ((flags->create_flags & @@ -804,7 +804,7 @@ static int kbase_api_set_flags(struct kbase_file *kfile, spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); #endif } diff --git a/mali_kbase/mali_kbase_event.c b/mali_kbase/mali_kbase_event.c index a884596..d58decb 100644 --- a/mali_kbase/mali_kbase_event.c +++ b/mali_kbase/mali_kbase_event.c @@ -87,9 +87,9 @@ int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *ueve if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) kbase_jd_free_external_resources(atom); - mutex_lock(&ctx->jctx.lock); + rt_mutex_lock(&ctx->jctx.lock); uevent->udata = kbase_event_process(ctx, atom); - mutex_unlock(&ctx->jctx.lock); + rt_mutex_unlock(&ctx->jctx.lock); return 0; } @@ -111,9 +111,9 @@ static void kbase_event_process_noreport_worker(struct kthread_work *data) if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) kbase_jd_free_external_resources(katom); - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); kbase_event_process(kctx, katom); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); } /** diff --git a/mali_kbase/mali_kbase_jd.c b/mali_kbase/mali_kbase_jd.c index 4e69069..96c475a 100644 --- a/mali_kbase/mali_kbase_jd.c +++ b/mali_kbase/mali_kbase_jd.c @@ -1451,7 +1451,7 @@ int kbase_jd_submit(struct kbase_context *kctx, user_addr = (void __user *)((uintptr_t) user_addr + stride); - mutex_lock(&jctx->lock); + rt_mutex_lock(&jctx->lock); #ifndef compiletime_assert #define compiletime_assert_defined #define compiletime_assert(x, msg) do { switch (0) { case 0: case (x):; } } \ @@ -1476,7 +1476,7 @@ while (false) /* Atom number is already in use, wait for the atom to * complete */ - mutex_unlock(&jctx->lock); + rt_mutex_unlock(&jctx->lock); /* This thread will wait for the atom to complete. Due * to thread scheduling we are not sure that the other @@ -1495,7 +1495,7 @@ while (false) */ return 0; } - mutex_lock(&jctx->lock); + rt_mutex_lock(&jctx->lock); } KBASE_TLSTREAM_TL_JD_SUBMIT_ATOM_START(kbdev, katom); need_to_try_schedule_context |= jd_submit_atom(kctx, &user_atom, @@ -1506,7 +1506,7 @@ while (false) */ kbase_disjoint_event_potential(kbdev); - mutex_unlock(&jctx->lock); + rt_mutex_unlock(&jctx->lock); } if (need_to_try_schedule_context) @@ -1549,10 +1549,10 @@ void kbase_jd_done_worker(struct kthread_work *data) /* * Begin transaction on JD context and JS context */ - mutex_lock(&jctx->lock); + rt_mutex_lock(&jctx->lock); KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(kbdev, katom, TL_ATOM_STATE_DONE); - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_devdata->queue_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); /* This worker only gets called on contexts that are scheduled *in*. This is * because it only happens in response to an IRQ from a job that was @@ -1565,8 +1565,8 @@ void kbase_jd_done_worker(struct kthread_work *data) dev_dbg(kbdev->dev, "Atom %pK has been promoted to stopped\n", (void *)katom); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); @@ -1576,7 +1576,7 @@ void kbase_jd_done_worker(struct kthread_work *data) kbase_js_unpull(kctx, katom); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); - mutex_unlock(&jctx->lock); + rt_mutex_unlock(&jctx->lock); return; } @@ -1596,8 +1596,8 @@ void kbase_jd_done_worker(struct kthread_work *data) KBASE_DEBUG_ASSERT(kbasep_js_has_atom_finished(&katom_retained_state)); kbasep_js_remove_job(kbdev, kctx, katom); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */ jd_done_nolock(katom, false); @@ -1607,7 +1607,7 @@ void kbase_jd_done_worker(struct kthread_work *data) unsigned long flags; context_idle = false; - mutex_lock(&js_devdata->queue_mutex); + rt_mutex_lock(&js_devdata->queue_mutex); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); /* If kbase_sched() has scheduled this context back in then @@ -1647,13 +1647,13 @@ void kbase_jd_done_worker(struct kthread_work *data) kbase_ctx_flag_set(kctx, KCTX_ACTIVE); } spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); } /* * Transaction complete */ - mutex_unlock(&jctx->lock); + rt_mutex_unlock(&jctx->lock); /* Job is now no longer running, so can now safely release the context * reference, and handle any actions that were logged against the @@ -1668,7 +1668,7 @@ void kbase_jd_done_worker(struct kthread_work *data) /* If worker now idle then post all events that jd_done_nolock() * has queued */ - mutex_lock(&jctx->lock); + rt_mutex_lock(&jctx->lock); while (!list_empty(&kctx->completed_jobs)) { struct kbase_jd_atom *atom = list_entry( kctx->completed_jobs.next, @@ -1677,7 +1677,7 @@ void kbase_jd_done_worker(struct kthread_work *data) kbase_event_post(kctx, atom); } - mutex_unlock(&jctx->lock); + rt_mutex_unlock(&jctx->lock); } kbase_backend_complete_wq_post_sched(kbdev, core_req); @@ -1732,11 +1732,11 @@ static void jd_cancel_worker(struct kthread_work *data) KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED)); /* Scheduler: Remove the job from the system */ - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); attr_state_changed = kbasep_js_remove_cancelled_job(kbdev, kctx, katom); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_lock(&jctx->lock); + rt_mutex_lock(&jctx->lock); jd_done_nolock(katom, true); /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to @@ -1746,7 +1746,7 @@ static void jd_cancel_worker(struct kthread_work *data) KBASE_DEBUG_ASSERT(!need_to_try_schedule_context); /* katom may have been freed now, do not use! */ - mutex_unlock(&jctx->lock); + rt_mutex_unlock(&jctx->lock); if (attr_state_changed) kbase_js_sched_all(kbdev); @@ -1846,7 +1846,7 @@ void kbase_jd_zap_context(struct kbase_context *kctx) kbase_js_zap_context(kctx); - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); /* * While holding the struct kbase_jd_context lock clean up jobs which are known to kbase but are @@ -1864,7 +1864,7 @@ void kbase_jd_zap_context(struct kbase_context *kctx) kbase_dma_fence_cancel_all_atoms(kctx); #endif - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); #ifdef CONFIG_MALI_DMA_FENCE /* Flush dma-fence workqueue to ensure that any callbacks that may have @@ -1912,7 +1912,7 @@ int kbase_jd_init(struct kbase_context *kctx) for (i = 0; i < BASE_JD_RP_COUNT; i++) kctx->jctx.renderpasses[i].state = KBASE_JD_RP_COMPLETE; - mutex_init(&kctx->jctx.lock); + rt_mutex_init(&kctx->jctx.lock); init_waitqueue_head(&kctx->jctx.zero_jobs_wait); diff --git a/mali_kbase/mali_kbase_jd_debugfs.c b/mali_kbase/mali_kbase_jd_debugfs.c index 7cc082d..f9b41d5 100644 --- a/mali_kbase/mali_kbase_jd_debugfs.c +++ b/mali_kbase/mali_kbase_jd_debugfs.c @@ -168,7 +168,7 @@ static int kbasep_jd_debugfs_atoms_show(struct seq_file *sfile, void *data) atoms = kctx->jctx.atoms; /* General atom states */ - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); /* JS-related states */ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, irq_flags); for (i = 0; i != BASE_JD_ATOM_COUNT; ++i) { @@ -202,7 +202,7 @@ static int kbasep_jd_debugfs_atoms_show(struct seq_file *sfile, void *data) seq_puts(sfile, "\n"); } spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, irq_flags); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); return 0; } diff --git a/mali_kbase/mali_kbase_js.c b/mali_kbase/mali_kbase_js.c index f1da59e..33af2da 100644 --- a/mali_kbase/mali_kbase_js.c +++ b/mali_kbase/mali_kbase_js.c @@ -599,7 +599,7 @@ int kbasep_js_devdata_init(struct kbase_device * const kbdev) */ mutex_init(&jsdd->runpool_mutex); - mutex_init(&jsdd->queue_mutex); + rt_mutex_init(&jsdd->queue_mutex); sema_init(&jsdd->schedule_sem, 1); for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) { @@ -662,7 +662,7 @@ int kbasep_js_kctx_init(struct kbase_context *const kctx) /* On error, we could continue on: providing none of the below resources * rely on the ones above */ - mutex_init(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_init(&js_kctx_info->ctx.jsctx_mutex); init_waitqueue_head(&js_kctx_info->ctx.is_scheduled_wait); @@ -692,8 +692,8 @@ void kbasep_js_kctx_term(struct kbase_context *kctx) KBASE_DEBUG_ASSERT(!kbase_ctx_flag(kctx, KCTX_SCHEDULED)); KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs == 0); - mutex_lock(&kbdev->js_data.queue_mutex); - mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_lock(&kbdev->js_data.queue_mutex); + rt_mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) @@ -707,8 +707,8 @@ void kbasep_js_kctx_term(struct kbase_context *kctx) kbase_ctx_flag_clear(kctx, KCTX_RUNNABLE_REF); } - mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); - mutex_unlock(&kbdev->js_data.queue_mutex); + rt_mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_unlock(&kbdev->js_data.queue_mutex); if (update_ctx_count) { mutex_lock(&kbdev->js_data.runpool_mutex); @@ -1522,8 +1522,8 @@ bool kbasep_js_add_job(struct kbase_context *kctx, js_devdata = &kbdev->js_data; js_kctx_info = &kctx->jctx.sched_info; - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_devdata->queue_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); if (atom->core_req & BASE_JD_REQ_START_RENDERPASS) err = js_add_start_rp(atom); @@ -1639,9 +1639,9 @@ out_unlock: dev_dbg(kbdev->dev, "Enqueue of kctx %pK is %srequired\n", kctx, enqueue_required ? "" : "not "); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); return enqueue_required; } @@ -1986,8 +1986,8 @@ void kbasep_js_runpool_release_ctx_and_katom_retained_state( js_kctx_info = &kctx->jctx.sched_info; js_devdata = &kbdev->js_data; - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_devdata->queue_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); mutex_lock(&js_devdata->runpool_mutex); release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx, @@ -2001,8 +2001,8 @@ void kbasep_js_runpool_release_ctx_and_katom_retained_state( /* Drop the jsctx_mutex to allow scheduling in a new context */ - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); if (release_result & KBASEP_JS_RELEASE_RESULT_SCHED_ALL) kbase_js_sched_all(kbdev); @@ -2038,7 +2038,7 @@ static void kbasep_js_runpool_release_ctx_no_schedule( js_devdata = &kbdev->js_data; kbasep_js_atom_retained_state_init_invalid(katom_retained_state); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); mutex_lock(&js_devdata->runpool_mutex); release_result = kbasep_js_runpool_release_ctx_internal(kbdev, kctx, @@ -2050,7 +2050,7 @@ static void kbasep_js_runpool_release_ctx_no_schedule( kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, true); /* Drop the jsctx_mutex to allow scheduling in a new context */ - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); /* NOTE: could return release_result if the caller would like to know * whether it should schedule a new context, but currently no callers do @@ -2107,7 +2107,7 @@ static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev, /* * Atomic transaction on the Context and Run Pool begins */ - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); mutex_lock(&js_devdata->runpool_mutex); mutex_lock(&kbdev->mmu_hw_mutex); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); @@ -2120,7 +2120,7 @@ static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev, spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); mutex_unlock(&kbdev->mmu_hw_mutex); mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); return false; } @@ -2140,7 +2140,7 @@ static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev, spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); mutex_unlock(&kbdev->mmu_hw_mutex); mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); return false; } @@ -2189,7 +2189,7 @@ static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev, kbase_backend_ctx_count_changed(kbdev); mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); /* Note: after this point, the context could potentially get scheduled * out immediately */ @@ -2272,8 +2272,8 @@ void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, return; #endif - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_devdata->queue_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); /* Mark the context as privileged */ kbase_ctx_flag_set(kctx, KCTX_PRIVILEGED); @@ -2287,8 +2287,8 @@ void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, /* Fast-starting requires the jsctx_mutex to be dropped, * because it works on multiple ctxs */ - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); /* Try to schedule the context in */ kbase_js_sched_all(kbdev); @@ -2301,8 +2301,8 @@ void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev, * corresponding address space */ WARN_ON(!kbase_ctx_sched_inc_refcount(kctx)); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); } } KBASE_EXPORT_TEST_API(kbasep_js_schedule_privileged_ctx); @@ -2316,9 +2316,9 @@ void kbasep_js_release_privileged_ctx(struct kbase_device *kbdev, js_kctx_info = &kctx->jctx.sched_info; /* We don't need to use the address space anymore */ - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); kbase_ctx_flag_clear(kctx, KCTX_PRIVILEGED); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); /* Release the context - it will be scheduled out */ kbasep_js_runpool_release_ctx(kbdev, kctx); @@ -2391,7 +2391,7 @@ void kbasep_js_resume(struct kbase_device *kbdev) js_devdata = &kbdev->js_data; KBASE_DEBUG_ASSERT(!kbase_pm_is_suspending(kbdev)); - mutex_lock(&js_devdata->queue_mutex); + rt_mutex_lock(&js_devdata->queue_mutex); for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) { for (prio = KBASE_JS_ATOM_SCHED_PRIO_FIRST; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) { @@ -2413,7 +2413,7 @@ void kbasep_js_resume(struct kbase_device *kbdev) js_kctx_info = &kctx->jctx.sched_info; - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); mutex_lock(&js_devdata->runpool_mutex); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); @@ -2430,7 +2430,7 @@ void kbasep_js_resume(struct kbase_device *kbdev) kbase_backend_ctx_count_changed(kbdev); mutex_unlock(&js_devdata->runpool_mutex); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); /* Take lock before accessing list again */ spin_lock_irqsave(&kbdev->hwaccess_lock, flags); @@ -2462,7 +2462,7 @@ void kbasep_js_resume(struct kbase_device *kbdev) #endif } } - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); /* Restart atom processing */ kbase_js_sched_all(kbdev); @@ -2984,8 +2984,8 @@ static void js_return_worker(struct kthread_work *data) kbasep_js_atom_retained_state_copy(&retained_state, katom); - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&js_devdata->queue_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); if (katom->event_code != BASE_JD_EVENT_END_RP_DONE) atomic_dec(&katom->blocked); @@ -3062,17 +3062,17 @@ static void js_return_worker(struct kthread_work *data) if (timer_sync) kbase_js_sync_timers(kbdev); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); if (katom->core_req & BASE_JD_REQ_START_RENDERPASS) { - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); js_return_of_start_rp(katom); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); } else if (katom->event_code == BASE_JD_EVENT_END_RP_DONE) { - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); js_return_of_end_rp(katom); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); } dev_dbg(kbdev->dev, "JS: retained state %s finished", @@ -3616,7 +3616,7 @@ static bool kbase_js_defer_activate_for_slot(struct kbase_context *kctx, int js) if (js != 1 || kbase_pm_is_active(kbdev)) return false; - mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); for (prio = KBASE_JS_ATOM_SCHED_PRIO_REALTIME; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) { @@ -3635,7 +3635,7 @@ static bool kbase_js_defer_activate_for_slot(struct kbase_context *kctx, int js) done: spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); - mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); return ret; } @@ -3655,7 +3655,7 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask) js_devdata = &kbdev->js_data; down(&js_devdata->schedule_sem); - mutex_lock(&js_devdata->queue_mutex); + rt_mutex_lock(&js_devdata->queue_mutex); for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) { last_active[js] = kbdev->hwaccess.active_kctx[js]; @@ -3676,11 +3676,11 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask) if (first_deferred_ctx && kctx == first_deferred_ctx) { if (!kbase_pm_is_active(kbdev)) { - mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); if (kbase_js_ctx_list_add_pullable_head( kctx->kbdev, kctx, js)) kbase_js_sync_timers(kbdev); - mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); /* Stop looking for new pullable work for this slot */ kctx = NULL; } else { @@ -3709,12 +3709,12 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask) dev_dbg(kbdev->dev, "Deferring activation of kctx %pK for JS%d\n", (void *)kctx, js); - mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); ctx_count_changed = kbase_js_ctx_list_add_pullable_nolock( kctx->kbdev, kctx, js); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); - mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); if (ctx_count_changed) kbase_backend_ctx_count_changed(kctx->kbdev); if (!first_deferred_ctx) @@ -3730,14 +3730,14 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask) /* Suspend pending - return context to * queue and stop scheduling */ - mutex_lock( + rt_mutex_lock( &kctx->jctx.sched_info.ctx.jsctx_mutex); if (kbase_js_ctx_list_add_pullable_head( kctx->kbdev, kctx, js)) kbase_js_sync_timers(kbdev); - mutex_unlock( + rt_mutex_unlock( &kctx->jctx.sched_info.ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); up(&js_devdata->schedule_sem); KBASE_TLSTREAM_TL_JS_SCHED_END(kbdev, 0); @@ -3750,7 +3750,7 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask) } if (!kbase_js_use_ctx(kbdev, kctx, js)) { - mutex_lock( + rt_mutex_lock( &kctx->jctx.sched_info.ctx.jsctx_mutex); dev_dbg(kbdev->dev, @@ -3769,7 +3769,7 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask) kctx->kbdev, kctx, js); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); - mutex_unlock( + rt_mutex_unlock( &kctx->jctx.sched_info.ctx.jsctx_mutex); if (context_idle) { WARN_ON(!kbase_ctx_flag(kctx, KCTX_ACTIVE)); @@ -3781,7 +3781,7 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask) js_mask &= ~(1 << js); break; } - mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex); spin_lock_irqsave(&kbdev->hwaccess_lock, flags); kbase_ctx_flag_clear(kctx, KCTX_PULLED); @@ -3851,7 +3851,7 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask) &kbdev->hwaccess_lock, flags); } - mutex_unlock( + rt_mutex_unlock( &kctx->jctx.sched_info.ctx.jsctx_mutex); js_mask &= ~(1 << js); @@ -3870,7 +3870,7 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask) kctx->kbdev, kctx, js); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); - mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); + rt_mutex_unlock(&kctx->jctx.sched_info.ctx.jsctx_mutex); } } @@ -3886,7 +3886,7 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask) } } - mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); up(&js_devdata->schedule_sem); KBASE_TLSTREAM_TL_JS_SCHED_END(kbdev, 0); } @@ -3907,9 +3907,9 @@ void kbase_js_zap_context(struct kbase_context *kctx) * - mark the context as dying * - try to evict it from the queue */ - mutex_lock(&kctx->jctx.lock); - mutex_lock(&js_devdata->queue_mutex); - mutex_lock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&js_devdata->queue_mutex); + rt_mutex_lock(&js_kctx_info->ctx.jsctx_mutex); kbase_ctx_flag_set(kctx, KCTX_DYING); dev_dbg(kbdev->dev, "Zap: Try Evict Ctx %pK", kctx); @@ -3986,9 +3986,9 @@ void kbase_js_zap_context(struct kbase_context *kctx) */ kbasep_js_runpool_requeue_or_kill_ctx(kbdev, kctx, false); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&kctx->jctx.lock); } else { unsigned long flags; @@ -4023,9 +4023,9 @@ void kbase_js_zap_context(struct kbase_context *kctx) kbase_backend_jm_kill_running_jobs_from_kctx(kctx); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); - mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); - mutex_unlock(&js_devdata->queue_mutex); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex); + rt_mutex_unlock(&js_devdata->queue_mutex); + rt_mutex_unlock(&kctx->jctx.lock); dev_dbg(kbdev->dev, "Zap: Ctx %pK Release (may or may not schedule out immediately)", kctx); diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c index b494cca..6562f01 100644 --- a/mali_kbase/mali_kbase_mem.c +++ b/mali_kbase/mali_kbase_mem.c @@ -3625,7 +3625,7 @@ static int kbase_jit_debugfs_used_get(struct kbase_jit_debugfs_data *data) struct kbase_va_region *reg; #if !MALI_USE_CSF - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); #endif /* !MALI_USE_CSF */ mutex_lock(&kctx->jit_evict_lock); list_for_each_entry(reg, &kctx->jit_active_head, jit_node) { @@ -3633,7 +3633,7 @@ static int kbase_jit_debugfs_used_get(struct kbase_jit_debugfs_data *data) } mutex_unlock(&kctx->jit_evict_lock); #if !MALI_USE_CSF - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); #endif /* !MALI_USE_CSF */ return 0; @@ -3652,7 +3652,7 @@ static int kbase_jit_debugfs_trim_get(struct kbase_jit_debugfs_data *data) struct kbase_va_region *reg; #if !MALI_USE_CSF - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); #endif /* !MALI_USE_CSF */ kbase_gpu_vm_lock(kctx); mutex_lock(&kctx->jit_evict_lock); @@ -3673,7 +3673,7 @@ static int kbase_jit_debugfs_trim_get(struct kbase_jit_debugfs_data *data) mutex_unlock(&kctx->jit_evict_lock); kbase_gpu_vm_unlock(kctx); #if !MALI_USE_CSF - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); #endif /* !MALI_USE_CSF */ return 0; diff --git a/mali_kbase/mali_kbase_softjobs.c b/mali_kbase/mali_kbase_softjobs.c index 1f8f7d8..611a3b6 100644 --- a/mali_kbase/mali_kbase_softjobs.c +++ b/mali_kbase/mali_kbase_softjobs.c @@ -212,12 +212,12 @@ void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom) { struct kbase_context *kctx = katom->kctx; - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); kbasep_remove_waiting_soft_job(katom); kbase_finish_soft_job(katom); if (jd_done_nolock(katom, true)) kbase_js_sched_all(kctx->kbdev); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); } #endif @@ -228,9 +228,9 @@ static void kbasep_soft_event_complete_job(struct kthread_work *work) struct kbase_context *kctx = katom->kctx; int resched; - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); resched = jd_done_nolock(katom, true); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); if (resched) kbase_js_sched_all(kctx->kbdev); @@ -359,9 +359,9 @@ static void kbase_fence_debug_wait_timeout_worker(struct kthread_work *work) struct kbase_jd_atom *katom = w->katom; struct kbase_context *kctx = katom->kctx; - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); kbase_fence_debug_wait_timeout(katom); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); kfree(w); } @@ -481,7 +481,7 @@ int kbase_soft_event_update(struct kbase_context *kctx, { int err = 0; - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); if (kbasep_write_soft_event_status(kctx, event, new_status)) { err = -ENOENT; @@ -492,7 +492,7 @@ int kbase_soft_event_update(struct kbase_context *kctx, kbasep_complete_triggered_soft_events(kctx, event); out: - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); return err; } @@ -1355,10 +1355,10 @@ static void kbasep_jit_finish_worker(struct kthread_work *work) struct kbase_context *kctx = katom->kctx; int resched; - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); kbase_finish_soft_job(katom); resched = jd_done_nolock(katom, true); - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); if (resched) kbase_js_sched_all(kctx->kbdev); @@ -1788,7 +1788,7 @@ void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev) &local_suspended_soft_jobs, dep_item[1]) { struct kbase_context *kctx = katom_iter->kctx; - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); /* Remove from the global list */ list_del(&katom_iter->dep_item[1]); @@ -1802,7 +1802,7 @@ void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev) atomic_dec(&kbdev->pm.gpu_users_waiting); #endif /* CONFIG_MALI_ARBITER_SUPPORT */ } - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); } if (resched) diff --git a/mali_kbase/mmu/mali_kbase_mmu.c b/mali_kbase/mmu/mali_kbase_mmu.c index 90af861..5b71f2b 100644 --- a/mali_kbase/mmu/mali_kbase_mmu.c +++ b/mali_kbase/mmu/mali_kbase_mmu.c @@ -776,7 +776,7 @@ void kbase_mmu_page_fault_worker(struct work_struct *data) #if MALI_JIT_PRESSURE_LIMIT_BASE #if !MALI_USE_CSF - mutex_lock(&kctx->jctx.lock); + rt_mutex_lock(&kctx->jctx.lock); #endif #endif @@ -1198,7 +1198,7 @@ fault_done: kbase_gpu_vm_unlock(kctx); } #if !MALI_USE_CSF - mutex_unlock(&kctx->jctx.lock); + rt_mutex_unlock(&kctx->jctx.lock); #endif #endif @@ -1964,9 +1964,9 @@ kbase_mmu_flush_invalidate(struct kbase_context *kctx, u64 vpfn, size_t nr, kbdev = kctx->kbdev; #if !MALI_USE_CSF - mutex_lock(&kbdev->js_data.queue_mutex); + rt_mutex_lock(&kbdev->js_data.queue_mutex); ctx_is_in_runpool = kbase_ctx_sched_inc_refcount(kctx); - mutex_unlock(&kbdev->js_data.queue_mutex); + rt_mutex_unlock(&kbdev->js_data.queue_mutex); #else ctx_is_in_runpool = kbase_ctx_sched_inc_refcount_if_as_valid(kctx); #endif /* !MALI_USE_CSF */ |