summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMattias Simonsson <mattiass@google.com>2023-08-23 12:49:27 +0000
committerMattias Simonsson <mattiass@google.com>2023-08-31 11:19:08 +0000
commit46edf1b5965d872c5f8a09c6dc3dcbff58f78a92 (patch)
tree33a8dbcd4f198b81c8cd92de2356cd7d1135a000
parent43ed6d1d9e76b0cfea65a825a33c92ead0e7d6aa (diff)
downloadgpu-46edf1b5965d872c5f8a09c6dc3dcbff58f78a92.tar.gz
mali_kbase: Use kthread for protm_event_worker
protm_event_worker is responsible for triggering protected mode entry, but it currently runs in a default priority workqueue, which results in occasional long stalls for protected mode work. The workload of this task is very light, typically only running for a few tens of microseconds each invocation, so converting it to a kthread should have no significant impact on overall system performance. Bug: 297019294 Test: Manual Netflix PiP perfetto trace inspection Change-Id: Ica9e744bd390ae6b88a701bf2d390f82937581d0
-rw-r--r--mali_kbase/csf/mali_kbase_csf.c36
-rw-r--r--mali_kbase/csf/mali_kbase_csf_defs.h4
2 files changed, 27 insertions, 13 deletions
diff --git a/mali_kbase/csf/mali_kbase_csf.c b/mali_kbase/csf/mali_kbase_csf.c
index 239d9de..2e3ced3 100644
--- a/mali_kbase/csf/mali_kbase_csf.c
+++ b/mali_kbase/csf/mali_kbase_csf.c
@@ -1148,7 +1148,7 @@ static int create_normal_suspend_buffer(struct kbase_context *const kctx,
}
static void timer_event_worker(struct work_struct *data);
-static void protm_event_worker(struct work_struct *data);
+static void protm_event_worker(struct kthread_work *work);
static void term_normal_suspend_buffer(struct kbase_context *const kctx,
struct kbase_normal_suspend_buffer *s_buf);
@@ -1259,7 +1259,7 @@ static int create_queue_group(struct kbase_context *const kctx,
INIT_LIST_HEAD(&group->error_timeout.link);
INIT_LIST_HEAD(&group->error_tiler_oom.link);
INIT_WORK(&group->timer_event_work, timer_event_worker);
- INIT_WORK(&group->protm_event_work, protm_event_worker);
+ kthread_init_work(&group->protm_event_work, protm_event_worker);
bitmap_zero(group->protm_pending_bitmap,
MAX_SUPPORTED_STREAMS_PER_GROUP);
@@ -1501,7 +1501,7 @@ static void wait_group_deferred_deschedule_completion(struct kbase_queue_group *
static void cancel_queue_group_events(struct kbase_queue_group *group)
{
cancel_work_sync(&group->timer_event_work);
- cancel_work_sync(&group->protm_event_work);
+ kthread_cancel_work_sync(&group->protm_event_work);
}
static void remove_pending_group_fatal_error(struct kbase_queue_group *group)
@@ -1688,7 +1688,14 @@ int kbase_csf_ctx_init(struct kbase_context *kctx)
&kctx->csf.pending_submission_worker, "mali_submit");
if (err) {
dev_err(kctx->kbdev->dev, "error initializing pending submission worker thread");
- goto out_err_kthread;
+ goto out_err_submission_kthread;
+ }
+
+ err = kbase_create_realtime_thread(kctx->kbdev, kthread_worker_fn,
+ &kctx->csf.protm_event_worker, "mali_protm_event");
+ if (err) {
+ dev_err(kctx->kbdev->dev, "error initializing protm event worker thread");
+ goto out_err_protm_kthread;
}
err = kbase_csf_scheduler_context_init(kctx);
@@ -1720,8 +1727,10 @@ out_err_tiler_heap_context:
out_err_kcpu_queue_context:
kbase_csf_scheduler_context_term(kctx);
out_err_scheduler_context:
+ kbase_destroy_kworker_stack(&kctx->csf.protm_event_worker);
+out_err_protm_kthread:
kbase_destroy_kworker_stack(&kctx->csf.pending_submission_worker);
-out_err_kthread:
+out_err_submission_kthread:
destroy_workqueue(kctx->csf.wq);
out:
return err;
@@ -1878,6 +1887,7 @@ void kbase_csf_ctx_term(struct kbase_context *kctx)
rt_mutex_unlock(&kctx->csf.lock);
kbase_destroy_kworker_stack(&kctx->csf.pending_submission_worker);
+ kbase_destroy_kworker_stack(&kctx->csf.protm_event_worker);
kbasep_ctx_user_reg_page_mapping_term(kctx);
kbase_csf_tiler_heap_context_term(kctx);
@@ -2303,16 +2313,16 @@ static void report_group_fatal_error(struct kbase_queue_group *const group)
/**
* protm_event_worker - Protected mode switch request event handler
- * called from a workqueue.
+ * called from a kthread.
*
- * @data: Pointer to a work_struct embedded in GPU command queue group data.
+ * @work: Pointer to a kthread_work struct embedded in GPU command queue group data.
*
* Request to switch to protected mode.
*/
-static void protm_event_worker(struct work_struct *data)
+static void protm_event_worker(struct kthread_work *work)
{
struct kbase_queue_group *const group =
- container_of(data, struct kbase_queue_group, protm_event_work);
+ container_of(work, struct kbase_queue_group, protm_event_work);
struct kbase_protected_suspend_buffer *sbuf = &group->protected_suspend_buf;
int err = 0;
@@ -2325,7 +2335,7 @@ static void protm_event_worker(struct work_struct *data)
} else if (err == -ENOMEM && sbuf->alloc_retries <= PROTM_ALLOC_MAX_RETRIES) {
sbuf->alloc_retries++;
/* try again to allocate pages */
- queue_work(group->kctx->csf.wq, &group->protm_event_work);
+ kthread_queue_work(&group->kctx->csf.protm_event_worker, &group->protm_event_work);
} else if (sbuf->alloc_retries >= PROTM_ALLOC_MAX_RETRIES || err != -ENOMEM) {
dev_err(group->kctx->kbdev->dev,
"Failed to allocate physical pages for Protected mode suspend buffer for the group %d of context %d_%d",
@@ -2707,7 +2717,8 @@ static void process_cs_interrupts(struct kbase_queue_group *const group,
}
if (!group->protected_suspend_buf.pma)
- queue_work(group->kctx->csf.wq, &group->protm_event_work);
+ kthread_queue_work(&group->kctx->csf.protm_event_worker,
+ &group->protm_event_work);
if (test_bit(group->csg_nr, scheduler->csg_slots_idle_mask)) {
clear_bit(group->csg_nr,
@@ -3065,7 +3076,8 @@ static inline void process_tracked_info_for_protm(struct kbase_device *kbdev,
if (!tock_triggered) {
dev_dbg(kbdev->dev, "Group-%d on slot-%d start protm work\n",
group->handle, group->csg_nr);
- queue_work(group->kctx->csf.wq, &group->protm_event_work);
+ kthread_queue_work(&group->kctx->csf.protm_event_worker,
+ &group->protm_event_work);
}
}
}
diff --git a/mali_kbase/csf/mali_kbase_csf_defs.h b/mali_kbase/csf/mali_kbase_csf_defs.h
index 3bc3623..cb4e5eb 100644
--- a/mali_kbase/csf/mali_kbase_csf_defs.h
+++ b/mali_kbase/csf/mali_kbase_csf_defs.h
@@ -562,7 +562,7 @@ struct kbase_queue_group {
struct kbase_queue *bound_queues[MAX_SUPPORTED_STREAMS_PER_GROUP];
int doorbell_nr;
- struct work_struct protm_event_work;
+ struct kthread_work protm_event_work;
DECLARE_BITMAP(protm_pending_bitmap, MAX_SUPPORTED_STREAMS_PER_GROUP);
struct kbase_csf_notification error_fatal;
@@ -836,6 +836,7 @@ struct kbase_csf_user_reg_context {
* @sched: Object representing the scheduler's context
* @pending_submission_worker: Worker for the pending submission work item
* @pending_submission_work: Work item to process pending kicked GPU command queues.
+ * @protm_event_worker: Worker to process requests to enter protected mode.
* @cpu_queue: CPU queue information. Only be available when DEBUG_FS
* is enabled.
* @user_reg: Collective information to support mapping to USER Register page.
@@ -856,6 +857,7 @@ struct kbase_csf_context {
struct kbase_csf_scheduler_context sched;
struct kthread_worker pending_submission_worker;
struct kthread_work pending_submission_work;
+ struct kthread_worker protm_event_worker;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct kbase_csf_cpu_queue_context cpu_queue;
#endif