summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMattias Simonsson <mattiass@google.com>2023-08-21 12:30:28 +0000
committerMattias Simonsson <mattiass@google.com>2023-08-24 12:47:46 +0000
commite950fd2ae546b6e16bce788dfdc111465a75fe8e (patch)
tree7e9cb8ed75e9cf6d3c1dd6ec931a97fd27c25a94
parent1f55a83e420d0069045abfb4db2135ed5469f41f (diff)
downloadgpu-e950fd2ae546b6e16bce788dfdc111465a75fe8e.tar.gz
mali_kbase: Use rt_mutex for scheduler lock
We use pm_runtime for handling suspends, which means that an internal linux power workqueue will take and hold the CSF scheduler lock while performing suspend operations. This workqueue runs at default priority and so might not be scheduled fast enough if there is high CPU load, which results in missed frames. By promoting the CSF scheduler lock to an rt_mutex we ensure that the GPU wakes up faster when there is work that needs to be done, since our other internal realtime threads will be waiting on the scheduler lock in these cases. Bug: 296862283 Test: CUJ close camera Change-Id: Ibe81ad7e26a75acb1640b2820902eb8144a48fdd Merged-In: I0e81a45e3d65f60337250d35c53ef6447c876051
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_policy.c4
-rw-r--r--mali_kbase/csf/mali_kbase_csf_defs.h2
-rw-r--r--mali_kbase/csf/mali_kbase_csf_scheduler.c141
-rw-r--r--mali_kbase/csf/mali_kbase_csf_scheduler.h4
-rw-r--r--mali_kbase/csf/mali_kbase_csf_tiler_heap_reclaim.c6
-rw-r--r--mali_kbase/platform/pixel/pixel_gpu_sscd.c6
-rw-r--r--mali_kbase/tl/backend/mali_kbase_timeline_csf.c4
7 files changed, 83 insertions, 84 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_policy.c b/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
index 898d70f..f5dc008 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
@@ -322,7 +322,7 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
* check it here. So we hold the scheduler lock to avoid other operations
* interfering with the policy change and vice versa.
*/
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
/* policy_change_clamp_state_to_off, when needed, is set/cleared in
* this function, a very limited temporal scope for covering the
@@ -412,7 +412,7 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
/* Reverse the suspension done */
if (sched_suspend)
kbase_csf_scheduler_pm_resume_no_lock(kbdev);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
if (reset_op_prevented)
kbase_reset_gpu_allow(kbdev);
diff --git a/mali_kbase/csf/mali_kbase_csf_defs.h b/mali_kbase/csf/mali_kbase_csf_defs.h
index b742f97..3bc3623 100644
--- a/mali_kbase/csf/mali_kbase_csf_defs.h
+++ b/mali_kbase/csf/mali_kbase_csf_defs.h
@@ -1097,7 +1097,7 @@ struct kbase_csf_mcu_shared_regions {
* CSG suspend buffers.
*/
struct kbase_csf_scheduler {
- struct mutex lock;
+ struct rt_mutex lock;
spinlock_t interrupt_lock;
enum kbase_csf_scheduler_state state;
DECLARE_BITMAP(doorbell_inuse_bitmap, CSF_NUM_DOORBELL);
diff --git a/mali_kbase/csf/mali_kbase_csf_scheduler.c b/mali_kbase/csf/mali_kbase_csf_scheduler.c
index 2b0952d..f21067f 100644
--- a/mali_kbase/csf/mali_kbase_csf_scheduler.c
+++ b/mali_kbase/csf/mali_kbase_csf_scheduler.c
@@ -244,11 +244,11 @@ void wait_for_dump_complete_on_group_deschedule(struct kbase_queue_group *group)
(scheduler->state == SCHED_BUSY)) &&
queue_group_scheduled_locked(group)) {
group->deschedule_deferred_cnt++;
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
rt_mutex_unlock(&kctx->csf.lock);
kbase_debug_csf_fault_wait_completion(kbdev);
rt_mutex_lock(&kctx->csf.lock);
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
group->deschedule_deferred_cnt--;
}
#endif
@@ -290,9 +290,9 @@ static void schedule_actions_trigger_df(struct kbase_device *kbdev,
return;
}
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
kbase_debug_csf_fault_wait_completion(kbdev);
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
WARN_ON(scheduler->state != SCHED_BUSY);
#endif
}
@@ -340,12 +340,12 @@ static int wait_for_scheduler_to_exit_sleep(struct kbase_device *kbdev)
remaining = kbase_csf_timeout_in_jiffies(sleep_exit_wait_time);
while ((scheduler->state == SCHED_SLEEPING) && !ret) {
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
remaining = wait_event_timeout(
kbdev->csf.event_wait,
(scheduler->state != SCHED_SLEEPING),
remaining);
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
if (!remaining && (scheduler->state == SCHED_SLEEPING))
ret = -ETIMEDOUT;
}
@@ -596,10 +596,10 @@ static void scheduler_doorbell_init(struct kbase_device *kbdev)
bitmap_zero(kbdev->csf.scheduler.doorbell_inuse_bitmap,
CSF_NUM_DOORBELL);
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
/* Reserve doorbell 0 for use by kernel driver */
doorbell_nr = acquire_doorbell(kbdev);
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
WARN_ON(doorbell_nr != CSF_KERNEL_DOORBELL_NR);
}
@@ -1504,7 +1504,7 @@ retry:
/* Update the group state so that it can get scheduled soon */
update_idle_suspended_group_state(group);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
/* This function is called when the queue group is either not on a CSG
* slot or is on the slot but undergoing transition.
@@ -1527,7 +1527,7 @@ retry:
kbdev->csf.event_wait, can_halt_stream(kbdev, group),
kbase_csf_timeout_in_jiffies(group_schedule_timeout));
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
if (remaining && queue_group_scheduled_locked(group)) {
slot = kbase_csf_scheduler_group_get_slot(group);
@@ -1660,7 +1660,7 @@ int kbase_csf_scheduler_queue_stop(struct kbase_queue *queue)
kbase_reset_gpu_assert_failed_or_prevented(kbdev);
lockdep_assert_held(&queue->kctx->csf.lock);
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
queue->enabled = false;
KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, CSI_STOP, group, queue, cs_enabled);
@@ -1685,7 +1685,7 @@ int kbase_csf_scheduler_queue_stop(struct kbase_queue *queue)
kbase_csf_mcu_shared_drop_stopped_queue(kbdev, queue);
}
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, QUEUE_STOP, group, queue, group->run_state);
return err;
}
@@ -1933,11 +1933,11 @@ int kbase_csf_scheduler_queue_start(struct kbase_queue *queue)
if (WARN_ON(!group || queue->bind_state != KBASE_CSF_QUEUE_BOUND))
return -EINVAL;
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
#if IS_ENABLED(CONFIG_DEBUG_FS)
if (unlikely(kbdev->csf.scheduler.state == SCHED_BUSY)) {
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
return -EBUSY;
}
#endif
@@ -2018,7 +2018,7 @@ int kbase_csf_scheduler_queue_start(struct kbase_queue *queue)
*/
if (unlikely(err)) {
queue->enabled = cs_enabled;
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
return (err != -EIO) ? -EBUSY : err;
}
#endif
@@ -2030,7 +2030,7 @@ int kbase_csf_scheduler_queue_start(struct kbase_queue *queue)
}
}
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
if (evicted)
kbase_csf_term_descheduled_queue_group(group);
@@ -3291,7 +3291,7 @@ void kbase_csf_scheduler_group_deschedule(struct kbase_queue_group *group)
kbase_reset_gpu_assert_failed_or_prevented(kbdev);
lockdep_assert_held(&group->kctx->csf.lock);
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
KBASE_KTRACE_ADD_CSF_GRP(kbdev, GROUP_DESCHEDULE, group, group->run_state);
wait_for_dump_complete_on_group_deschedule(group);
@@ -3345,7 +3345,7 @@ void kbase_csf_scheduler_group_deschedule(struct kbase_queue_group *group)
WARN_ON(queue_group_scheduled_locked(group));
unlock:
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
}
/**
@@ -4121,7 +4121,7 @@ void kbase_csf_scheduler_evict_ctx_slots(struct kbase_device *kbdev,
DECLARE_BITMAP(slot_mask, MAX_SUPPORTED_CSGS) = {0};
lockdep_assert_held(&kctx->csf.lock);
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
/* This code is only called during reset, so we don't wait for the CSG
* slots to be stopped
@@ -4161,7 +4161,7 @@ void kbase_csf_scheduler_evict_ctx_slots(struct kbase_device *kbdev,
*/
kbase_event_wakeup_nosync(kctx);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
KBASE_KTRACE_ADD(kbdev, SCHEDULER_EVICT_CTX_SLOTS_END, kctx, num_groups);
}
@@ -5209,7 +5209,7 @@ static void gpu_idle_worker(struct work_struct *work)
return;
}
kbase_debug_csf_fault_wait_completion(kbdev);
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
if (!scheduler->gpu_idle_work_pending)
@@ -5220,7 +5220,7 @@ static void gpu_idle_worker(struct work_struct *work)
#if IS_ENABLED(CONFIG_DEBUG_FS)
if (unlikely(scheduler->state == SCHED_BUSY)) {
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
kbase_reset_gpu_allow(kbdev);
return;
}
@@ -5244,7 +5244,7 @@ static void gpu_idle_worker(struct work_struct *work)
#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
unlock:
#endif
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
kbase_reset_gpu_allow(kbdev);
KBASE_KTRACE_ADD(kbdev, SCHEDULER_GPU_IDLE_WORKER_END, NULL,
__ENCODE_KTRACE_INFO(false, scheduler_is_idle_suspendable,
@@ -5482,7 +5482,7 @@ static void sc_rails_off_worker(struct work_struct *work)
return;
}
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
/* All the previously sent CSG/CSI level requests are expected to have
* completed at this point.
*/
@@ -5504,7 +5504,7 @@ static void sc_rails_off_worker(struct work_struct *work)
ack_gpu_idle_event(kbdev);
}
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
kbase_reset_gpu_allow(kbdev);
KBASE_KTRACE_ADD(kbdev, SCHEDULER_EXIT_SC_RAIL, NULL,
kbase_csf_ktrace_gpu_cycle_cnt(kbdev));
@@ -6048,7 +6048,7 @@ static void schedule_on_tock(struct kthread_work *work)
return;
kbase_debug_csf_fault_wait_completion(kbdev);
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
if (can_skip_scheduling(kbdev))
{
atomic_set(&scheduler->pending_tock_work, false);
@@ -6071,7 +6071,7 @@ static void schedule_on_tock(struct kthread_work *work)
KBASE_KTRACE_ADD(kbdev, SCHED_INACTIVE, NULL, scheduler->state);
if (!scheduler->total_runnable_grps)
enqueue_gpu_idle_work(scheduler, 0);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
kbase_reset_gpu_allow(kbdev);
dev_dbg(kbdev->dev,
@@ -6081,7 +6081,7 @@ static void schedule_on_tock(struct kthread_work *work)
return;
exit_no_schedule_unlock:
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
kbase_reset_gpu_allow(kbdev);
}
@@ -6099,7 +6099,7 @@ static void schedule_on_tick(struct kthread_work *work)
return;
kbase_debug_csf_fault_wait_completion(kbdev);
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
if (can_skip_scheduling(kbdev))
goto exit_no_schedule_unlock;
@@ -6126,7 +6126,7 @@ static void schedule_on_tick(struct kthread_work *work)
}
scheduler->state = SCHED_INACTIVE;
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
KBASE_KTRACE_ADD(kbdev, SCHED_INACTIVE, NULL, scheduler->state);
kbase_reset_gpu_allow(kbdev);
@@ -6137,7 +6137,7 @@ static void schedule_on_tick(struct kthread_work *work)
return;
exit_no_schedule_unlock:
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
kbase_reset_gpu_allow(kbdev);
}
@@ -6172,7 +6172,7 @@ static int suspend_active_queue_groups_on_reset(struct kbase_device *kbdev)
int ret;
int ret2;
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
ret = suspend_active_queue_groups(kbdev, slot_mask);
@@ -6210,7 +6210,7 @@ static int suspend_active_queue_groups_on_reset(struct kbase_device *kbdev)
ret = ret2;
}
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
return ret;
}
@@ -6247,7 +6247,7 @@ static bool scheduler_handle_reset_in_protected_mode(struct kbase_device *kbdev)
unsigned long flags;
u32 csg_nr;
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
spin_lock_irqsave(&scheduler->interrupt_lock, flags);
protm_grp = scheduler->active_protm_grp;
@@ -6319,7 +6319,7 @@ static bool scheduler_handle_reset_in_protected_mode(struct kbase_device *kbdev)
}
unlock:
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
return suspend_on_slot_groups;
}
@@ -6353,7 +6353,7 @@ static void scheduler_inner_reset(struct kbase_device *kbdev)
cancel_tock_work(scheduler);
cancel_delayed_work_sync(&scheduler->ping_work);
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
spin_lock_irqsave(&scheduler->interrupt_lock, flags);
bitmap_fill(scheduler->csgs_events_enable_mask, MAX_SUPPORTED_CSGS);
@@ -6373,7 +6373,7 @@ static void scheduler_inner_reset(struct kbase_device *kbdev)
scheduler->num_active_address_spaces |
(((u64)scheduler->total_runnable_grps) << 32));
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
}
void kbase_csf_scheduler_reset(struct kbase_device *kbdev)
@@ -6451,7 +6451,7 @@ static void firmware_aliveness_monitor(struct work_struct *work)
return;
}
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
#ifdef CONFIG_MALI_DEBUG
if (fw_debug) {
@@ -6495,7 +6495,7 @@ static void firmware_aliveness_monitor(struct work_struct *work)
kbase_pm_context_idle(kbdev);
exit:
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
kbase_reset_gpu_allow(kbdev);
}
@@ -6510,7 +6510,7 @@ int kbase_csf_scheduler_group_copy_suspend_buf(struct kbase_queue_group *group,
kbase_reset_gpu_assert_prevented(kbdev);
lockdep_assert_held(&kctx->csf.lock);
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
on_slot = kbasep_csf_scheduler_group_is_on_slot_locked(group);
@@ -6619,7 +6619,7 @@ int kbase_csf_scheduler_group_copy_suspend_buf(struct kbase_queue_group *group,
}
exit:
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
return err;
}
@@ -6751,7 +6751,7 @@ void kbase_csf_scheduler_group_protm_enter(struct kbase_queue_group *group)
if (err)
return;
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
if (group->run_state == KBASE_CSF_GROUP_IDLE) {
group->run_state = KBASE_CSF_GROUP_RUNNABLE;
@@ -6762,7 +6762,7 @@ void kbase_csf_scheduler_group_protm_enter(struct kbase_queue_group *group)
if (scheduler_get_protm_enter_async_group(kbdev, group))
scheduler_group_check_protm_enter(kbdev, group);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
kbase_reset_gpu_allow(kbdev);
}
@@ -7004,13 +7004,13 @@ static void check_group_sync_update_worker(struct kthread_work *work)
struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
bool sync_updated = false;
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
#if IS_ENABLED(CONFIG_DEBUG_FS)
if (unlikely(scheduler->state == SCHED_BUSY)) {
kthread_queue_work(&kctx->csf.sched.sync_update_worker,
&kctx->csf.sched.sync_update_work);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
return;
}
#endif
@@ -7064,7 +7064,7 @@ static void check_group_sync_update_worker(struct kthread_work *work)
KBASE_KTRACE_ADD(kbdev, SCHEDULER_GROUP_SYNC_UPDATE_WORKER_END, kctx, 0u);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
}
static
@@ -7185,7 +7185,7 @@ int kbase_csf_scheduler_early_init(struct kbase_device *kbdev)
INIT_DEFERRABLE_WORK(&scheduler->ping_work, firmware_aliveness_monitor);
- mutex_init(&scheduler->lock);
+ rt_mutex_init(&scheduler->lock);
spin_lock_init(&scheduler->interrupt_lock);
/* Internal lists */
@@ -7246,7 +7246,7 @@ void kbase_csf_scheduler_term(struct kbase_device *kbdev)
#else
flush_work(&kbdev->csf.scheduler.gpu_idle_work);
#endif
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
if (kbdev->csf.scheduler.state != SCHED_SUSPENDED) {
unsigned long flags;
@@ -7259,7 +7259,7 @@ void kbase_csf_scheduler_term(struct kbase_device *kbdev)
scheduler_suspend(kbdev);
}
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
cancel_delayed_work_sync(&kbdev->csf.scheduler.ping_work);
cancel_tick_timer(kbdev);
cancel_tick_work(&kbdev->csf.scheduler);
@@ -7281,7 +7281,6 @@ void kbase_csf_scheduler_early_term(struct kbase_device *kbdev)
kbase_destroy_kworker_stack(&kbdev->csf.scheduler.csf_worker);
kbase_csf_tiler_heap_reclaim_mgr_term(kbdev);
- mutex_destroy(&kbdev->csf.scheduler.lock);
}
/**
@@ -7317,9 +7316,9 @@ static void scheduler_enable_tick_timer_nolock(struct kbase_device *kbdev)
void kbase_csf_scheduler_enable_tick_timer(struct kbase_device *kbdev)
{
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
scheduler_enable_tick_timer_nolock(kbdev);
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
}
bool kbase_csf_scheduler_timer_is_enabled(struct kbase_device *kbdev)
@@ -7327,9 +7326,9 @@ bool kbase_csf_scheduler_timer_is_enabled(struct kbase_device *kbdev)
struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
bool enabled;
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
enabled = scheduler_timer_is_enabled_nolock(kbdev);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
return enabled;
}
@@ -7340,13 +7339,13 @@ void kbase_csf_scheduler_timer_set_enabled(struct kbase_device *kbdev,
struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
bool currently_enabled;
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
currently_enabled = scheduler_timer_is_enabled_nolock(kbdev);
if (currently_enabled && !enable) {
scheduler->timer_enabled = false;
cancel_tick_timer(kbdev);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
/* The non-sync version to cancel the normal work item is not
* available, so need to drop the lock before cancellation.
*/
@@ -7361,14 +7360,14 @@ void kbase_csf_scheduler_timer_set_enabled(struct kbase_device *kbdev,
scheduler_enable_tick_timer_nolock(kbdev);
}
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
}
void kbase_csf_scheduler_kick(struct kbase_device *kbdev)
{
struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler;
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
if (unlikely(scheduler_timer_is_enabled_nolock(kbdev)))
goto out;
@@ -7379,7 +7378,7 @@ void kbase_csf_scheduler_kick(struct kbase_device *kbdev)
}
out:
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
}
int kbase_csf_scheduler_pm_suspend_no_lock(struct kbase_device *kbdev)
@@ -7438,10 +7437,10 @@ int kbase_csf_scheduler_pm_suspend(struct kbase_device *kbdev)
return result;
}
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
result = kbase_csf_scheduler_pm_suspend_no_lock(kbdev);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
kbase_reset_gpu_allow(kbdev);
@@ -7463,9 +7462,9 @@ void kbase_csf_scheduler_pm_resume_no_lock(struct kbase_device *kbdev)
void kbase_csf_scheduler_pm_resume(struct kbase_device *kbdev)
{
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
kbase_csf_scheduler_pm_resume_no_lock(kbdev);
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
}
KBASE_EXPORT_TEST_API(kbase_csf_scheduler_pm_resume);
@@ -7475,10 +7474,10 @@ void kbase_csf_scheduler_pm_active(struct kbase_device *kbdev)
* callback function, which may need to wake up the MCU for suspending
* the CSGs before powering down the GPU.
*/
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
scheduler_pm_active_handle_suspend(kbdev,
KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE);
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
}
KBASE_EXPORT_TEST_API(kbase_csf_scheduler_pm_active);
@@ -7487,9 +7486,9 @@ void kbase_csf_scheduler_pm_idle(struct kbase_device *kbdev)
/* Here the lock is taken just to maintain symmetry with
* kbase_csf_scheduler_pm_active().
*/
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
scheduler_pm_idle(kbdev);
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
}
KBASE_EXPORT_TEST_API(kbase_csf_scheduler_pm_idle);
@@ -7594,11 +7593,11 @@ void kbase_csf_scheduler_force_sleep(struct kbase_device *kbdev)
{
struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
if (kbase_pm_gpu_sleep_allowed(kbdev) &&
(scheduler->state == SCHED_INACTIVE))
scheduler_sleep_on_idle(kbdev);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
}
#endif
@@ -7606,7 +7605,7 @@ void kbase_csf_scheduler_force_wakeup(struct kbase_device *kbdev)
{
struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
- mutex_lock(&scheduler->lock);
+ rt_mutex_lock(&scheduler->lock);
scheduler_wakeup(kbdev, true);
- mutex_unlock(&scheduler->lock);
+ rt_mutex_unlock(&scheduler->lock);
}
diff --git a/mali_kbase/csf/mali_kbase_csf_scheduler.h b/mali_kbase/csf/mali_kbase_csf_scheduler.h
index 17c025b..4062d78 100644
--- a/mali_kbase/csf/mali_kbase_csf_scheduler.h
+++ b/mali_kbase/csf/mali_kbase_csf_scheduler.h
@@ -276,7 +276,7 @@ int kbase_csf_scheduler_group_copy_suspend_buf(struct kbase_queue_group *group,
*/
static inline void kbase_csf_scheduler_lock(struct kbase_device *kbdev)
{
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
}
/**
@@ -286,7 +286,7 @@ static inline void kbase_csf_scheduler_lock(struct kbase_device *kbdev)
*/
static inline void kbase_csf_scheduler_unlock(struct kbase_device *kbdev)
{
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
}
/**
diff --git a/mali_kbase/csf/mali_kbase_csf_tiler_heap_reclaim.c b/mali_kbase/csf/mali_kbase_csf_tiler_heap_reclaim.c
index 92641d8..c12ed65 100644
--- a/mali_kbase/csf/mali_kbase_csf_tiler_heap_reclaim.c
+++ b/mali_kbase/csf/mali_kbase_csf_tiler_heap_reclaim.c
@@ -270,13 +270,13 @@ static unsigned long kbase_csf_tiler_heap_reclaim_scan_free_pages(struct kbase_d
unsigned long avail = 0;
/* If Scheduler is busy in action, return 0 */
- if (!mutex_trylock(&kbdev->csf.scheduler.lock)) {
+ if (!rt_mutex_trylock(&kbdev->csf.scheduler.lock)) {
struct kbase_csf_scheduler *const scheduler = &kbdev->csf.scheduler;
/* Wait for roughly 2-ms */
wait_event_timeout(kbdev->csf.event_wait, (scheduler->state != SCHED_BUSY),
msecs_to_jiffies(2));
- if (!mutex_trylock(&kbdev->csf.scheduler.lock)) {
+ if (!rt_mutex_trylock(&kbdev->csf.scheduler.lock)) {
dev_dbg(kbdev->dev, "Tiler heap reclaim scan see device busy (freed: 0)");
return 0;
}
@@ -286,7 +286,7 @@ static unsigned long kbase_csf_tiler_heap_reclaim_scan_free_pages(struct kbase_d
if (avail)
freed = reclaim_unused_heap_pages(kbdev);
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
#if (KERNEL_VERSION(4, 14, 0) <= LINUX_VERSION_CODE)
if (freed > sc->nr_to_scan)
diff --git a/mali_kbase/platform/pixel/pixel_gpu_sscd.c b/mali_kbase/platform/pixel/pixel_gpu_sscd.c
index 6a284fa..c65e6ce 100644
--- a/mali_kbase/platform/pixel/pixel_gpu_sscd.c
+++ b/mali_kbase/platform/pixel/pixel_gpu_sscd.c
@@ -313,7 +313,7 @@ static int get_and_init_contexts(struct kbase_device *kbdev,
size_t entry_idx;
int rc;
- if (!mutex_trylock(&kbdev->csf.scheduler.lock)) {
+ if (!rt_mutex_trylock(&kbdev->csf.scheduler.lock)) {
dev_warn(kbdev->dev, "could not lock scheduler during dump.");
return -EBUSY;
}
@@ -321,7 +321,7 @@ static int get_and_init_contexts(struct kbase_device *kbdev,
num_entries = bitmap_weight(scheduler->csg_inuse_bitmap, num_csg);
rc = pixel_context_snapshot_init(kbdev, segment, num_entries);
if (rc) {
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
return rc;
}
context_snapshot = segment->addr;
@@ -351,7 +351,7 @@ static int get_and_init_contexts(struct kbase_device *kbdev,
}
}
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
return 0;
}
#endif
diff --git a/mali_kbase/tl/backend/mali_kbase_timeline_csf.c b/mali_kbase/tl/backend/mali_kbase_timeline_csf.c
index a6062f1..e96e05b 100644
--- a/mali_kbase/tl/backend/mali_kbase_timeline_csf.c
+++ b/mali_kbase/tl/backend/mali_kbase_timeline_csf.c
@@ -84,7 +84,7 @@ void kbase_create_timeline_objects(struct kbase_device *kbdev)
* stream tracepoints are emitted to ensure we don't change the
* scheduler until after then
*/
- mutex_lock(&kbdev->csf.scheduler.lock);
+ rt_mutex_lock(&kbdev->csf.scheduler.lock);
for (slot_i = 0; slot_i < kbdev->csf.global_iface.group_num; slot_i++) {
@@ -105,7 +105,7 @@ void kbase_create_timeline_objects(struct kbase_device *kbdev)
*/
kbase_timeline_streams_body_reset(timeline);
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
/* For each context in the device... */
list_for_each_entry(kctx, &timeline->tl_kctx_list, tl_kctx_list_node) {