summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_jd.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/mali_kbase_jd.c')
-rw-r--r--mali_kbase/mali_kbase_jd.c357
1 files changed, 95 insertions, 262 deletions
diff --git a/mali_kbase/mali_kbase_jd.c b/mali_kbase/mali_kbase_jd.c
index 97add10..15e30db 100644
--- a/mali_kbase/mali_kbase_jd.c
+++ b/mali_kbase/mali_kbase_jd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -28,6 +28,11 @@
#include <linux/version.h>
#include <linux/ratelimit.h>
#include <linux/priority_control_manager.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/signal.h>
+#else
+#include <linux/signal.h>
+#endif
#include <mali_kbase_jm.h>
#include <mali_kbase_kinstr_jm.h>
@@ -35,7 +40,6 @@
#include <tl/mali_kbase_tracepoints.h>
#include <mali_linux_trace.h>
-#include "mali_kbase_dma_fence.h"
#include <mali_kbase_cs_experimental.h>
#include <mali_kbase_caps.h>
@@ -82,7 +86,7 @@ static void jd_mark_atom_complete(struct kbase_jd_atom *katom)
* Returns whether the JS needs a reschedule.
*
* Note that the caller must also check the atom status and
- * if it is KBASE_JD_ATOM_STATE_COMPLETED must call jd_done_nolock
+ * if it is KBASE_JD_ATOM_STATE_COMPLETED must call kbase_jd_done_nolock
*/
static bool jd_run_atom(struct kbase_jd_atom *katom)
{
@@ -148,7 +152,7 @@ void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom)
if (katom->status == KBASE_JD_ATOM_STATE_COMPLETED) {
/* The atom has already finished */
- resched |= jd_done_nolock(katom, true);
+ resched |= kbase_jd_done_nolock(katom, true);
}
if (resched)
@@ -158,15 +162,6 @@ void kbase_jd_dep_clear_locked(struct kbase_jd_atom *katom)
void kbase_jd_free_external_resources(struct kbase_jd_atom *katom)
{
-#ifdef CONFIG_MALI_DMA_FENCE
- /* Flush dma-fence workqueue to ensure that any callbacks that may have
- * been queued are done before continuing.
- * Any successfully completed atom would have had all it's callbacks
- * completed before the atom was run, so only flush for failed atoms.
- */
- if (katom->event_code != BASE_JD_EVENT_DONE)
- flush_workqueue(katom->kctx->dma_fence.wq);
-#endif /* CONFIG_MALI_DMA_FENCE */
}
static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
@@ -174,10 +169,6 @@ static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
KBASE_DEBUG_ASSERT(katom);
KBASE_DEBUG_ASSERT(katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES);
-#ifdef CONFIG_MALI_DMA_FENCE
- kbase_dma_fence_signal(katom);
-#endif /* CONFIG_MALI_DMA_FENCE */
-
kbase_gpu_vm_lock(katom->kctx);
/* only roll back if extres is non-NULL */
if (katom->extres) {
@@ -185,13 +176,7 @@ static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
res_no = katom->nr_extres;
while (res_no-- > 0) {
- struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
- struct kbase_va_region *reg;
-
- reg = kbase_region_tracker_find_region_base_address(
- katom->kctx,
- katom->extres[res_no].gpu_address);
- kbase_unmap_external_resource(katom->kctx, reg, alloc);
+ kbase_unmap_external_resource(katom->kctx, katom->extres[res_no]);
}
kfree(katom->extres);
katom->extres = NULL;
@@ -207,26 +192,8 @@ static void kbase_jd_post_external_resources(struct kbase_jd_atom *katom)
static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const struct base_jd_atom *user_atom)
{
- int err_ret_val = -EINVAL;
+ int err = -EINVAL;
u32 res_no;
-#ifdef CONFIG_MALI_DMA_FENCE
- struct kbase_dma_fence_resv_info info = {
- .resv_objs = NULL,
- .dma_fence_resv_count = 0,
- .dma_fence_excl_bitmap = NULL
- };
-#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
- /*
- * When both dma-buf fence and Android native sync is enabled, we
- * disable dma-buf fence for contexts that are using Android native
- * fences.
- */
- const bool implicit_sync = !kbase_ctx_flag(katom->kctx,
- KCTX_NO_IMPLICIT_SYNC);
-#else /* CONFIG_SYNC || CONFIG_SYNC_FILE*/
- const bool implicit_sync = true;
-#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
-#endif /* CONFIG_MALI_DMA_FENCE */
struct base_external_resource *input_extres;
KBASE_DEBUG_ASSERT(katom);
@@ -240,68 +207,32 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
if (!katom->extres)
return -ENOMEM;
- /* copy user buffer to the end of our real buffer.
- * Make sure the struct sizes haven't changed in a way
- * we don't support
- */
- BUILD_BUG_ON(sizeof(*input_extres) > sizeof(*katom->extres));
- input_extres = (struct base_external_resource *)
- (((unsigned char *)katom->extres) +
- (sizeof(*katom->extres) - sizeof(*input_extres)) *
- katom->nr_extres);
+ input_extres = kmalloc_array(katom->nr_extres, sizeof(*input_extres), GFP_KERNEL);
+ if (!input_extres) {
+ err = -ENOMEM;
+ goto failed_input_alloc;
+ }
if (copy_from_user(input_extres,
get_compat_pointer(katom->kctx, user_atom->extres_list),
sizeof(*input_extres) * katom->nr_extres) != 0) {
- err_ret_val = -EINVAL;
- goto early_err_out;
+ err = -EINVAL;
+ goto failed_input_copy;
}
-#ifdef CONFIG_MALI_DMA_FENCE
- if (implicit_sync) {
- info.resv_objs =
- kmalloc_array(katom->nr_extres,
-#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
- sizeof(struct reservation_object *),
-#else
- sizeof(struct dma_resv *),
-#endif
- GFP_KERNEL);
- if (!info.resv_objs) {
- err_ret_val = -ENOMEM;
- goto early_err_out;
- }
-
- info.dma_fence_excl_bitmap =
- kcalloc(BITS_TO_LONGS(katom->nr_extres),
- sizeof(unsigned long), GFP_KERNEL);
- if (!info.dma_fence_excl_bitmap) {
- err_ret_val = -ENOMEM;
- goto early_err_out;
- }
- }
-#endif /* CONFIG_MALI_DMA_FENCE */
-
/* Take the processes mmap lock */
down_read(kbase_mem_get_process_mmap_lock());
/* need to keep the GPU VM locked while we set up UMM buffers */
kbase_gpu_vm_lock(katom->kctx);
for (res_no = 0; res_no < katom->nr_extres; res_no++) {
- struct base_external_resource *res = &input_extres[res_no];
+ struct base_external_resource *user_res = &input_extres[res_no];
struct kbase_va_region *reg;
- struct kbase_mem_phy_alloc *alloc;
-#ifdef CONFIG_MALI_DMA_FENCE
- bool exclusive;
- exclusive = (res->ext_resource & BASE_EXT_RES_ACCESS_EXCLUSIVE)
- ? true : false;
-#endif
reg = kbase_region_tracker_find_region_enclosing_address(
- katom->kctx,
- res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
+ katom->kctx, user_res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
/* did we find a matching region object? */
- if (kbase_is_region_invalid_or_free(reg)) {
+ if (unlikely(kbase_is_region_invalid_or_free(reg))) {
/* roll back */
goto failed_loop;
}
@@ -311,36 +242,11 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
katom->atom_flags |= KBASE_KATOM_FLAG_PROTECTED;
}
- alloc = kbase_map_external_resource(katom->kctx, reg,
- current->mm);
- if (!alloc) {
- err_ret_val = -EINVAL;
+ err = kbase_map_external_resource(katom->kctx, reg, current->mm);
+ if (err)
goto failed_loop;
- }
-
-#ifdef CONFIG_MALI_DMA_FENCE
- if (implicit_sync &&
- reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
-#if (KERNEL_VERSION(5, 4, 0) > LINUX_VERSION_CODE)
- struct reservation_object *resv;
-#else
- struct dma_resv *resv;
-#endif
- resv = reg->gpu_alloc->imported.umm.dma_buf->resv;
- if (resv)
- kbase_dma_fence_add_reservation(resv, &info,
- exclusive);
- }
-#endif /* CONFIG_MALI_DMA_FENCE */
- /* finish with updating out array with the data we found */
- /* NOTE: It is important that this is the last thing we do (or
- * at least not before the first write) as we overwrite elements
- * as we loop and could be overwriting ourself, so no writes
- * until the last read for an element.
- */
- katom->extres[res_no].gpu_address = reg->start_pfn << PAGE_SHIFT; /* save the start_pfn (as an address, not pfn) to use fast lookup later */
- katom->extres[res_no].alloc = alloc;
+ katom->extres[res_no] = reg;
}
/* successfully parsed the extres array */
/* drop the vm lock now */
@@ -349,57 +255,33 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
/* Release the processes mmap lock */
up_read(kbase_mem_get_process_mmap_lock());
-#ifdef CONFIG_MALI_DMA_FENCE
- if (implicit_sync) {
- if (info.dma_fence_resv_count) {
- int ret;
-
- ret = kbase_dma_fence_wait(katom, &info);
- if (ret < 0)
- goto failed_dma_fence_setup;
- }
-
- kfree(info.resv_objs);
- kfree(info.dma_fence_excl_bitmap);
- }
-#endif /* CONFIG_MALI_DMA_FENCE */
+ /* Free the buffer holding data from userspace */
+ kfree(input_extres);
/* all done OK */
return 0;
/* error handling section */
-
-#ifdef CONFIG_MALI_DMA_FENCE
-failed_dma_fence_setup:
- /* Lock the processes mmap lock */
- down_read(kbase_mem_get_process_mmap_lock());
-
- /* lock before we unmap */
- kbase_gpu_vm_lock(katom->kctx);
-#endif
-
- failed_loop:
- /* undo the loop work */
+failed_loop:
+ /* undo the loop work. We are guaranteed to have access to the VA region
+ * as we hold a reference to it until it's unmapped
+ */
while (res_no-- > 0) {
- struct kbase_mem_phy_alloc *alloc = katom->extres[res_no].alloc;
+ struct kbase_va_region *reg = katom->extres[res_no];
- kbase_unmap_external_resource(katom->kctx, NULL, alloc);
+ kbase_unmap_external_resource(katom->kctx, reg);
}
kbase_gpu_vm_unlock(katom->kctx);
/* Release the processes mmap lock */
up_read(kbase_mem_get_process_mmap_lock());
- early_err_out:
+failed_input_copy:
+ kfree(input_extres);
+failed_input_alloc:
kfree(katom->extres);
katom->extres = NULL;
-#ifdef CONFIG_MALI_DMA_FENCE
- if (implicit_sync) {
- kfree(info.resv_objs);
- kfree(info.dma_fence_excl_bitmap);
- }
-#endif
- return err_ret_val;
+ return err;
}
static inline void jd_resolve_dep(struct list_head *out_list,
@@ -422,10 +304,6 @@ static inline void jd_resolve_dep(struct list_head *out_list,
if (katom->event_code != BASE_JD_EVENT_DONE &&
(dep_type != BASE_JD_DEP_TYPE_ORDER)) {
-#ifdef CONFIG_MALI_DMA_FENCE
- kbase_dma_fence_cancel_callbacks(dep_atom);
-#endif
-
dep_atom->event_code = katom->event_code;
KBASE_DEBUG_ASSERT(dep_atom->status !=
KBASE_JD_ATOM_STATE_UNUSED);
@@ -439,35 +317,8 @@ static inline void jd_resolve_dep(struct list_head *out_list,
(IS_GPU_ATOM(dep_atom) && !ctx_is_dying &&
!dep_atom->will_fail_event_code &&
!other_dep_atom->will_fail_event_code))) {
- bool dep_satisfied = true;
-#ifdef CONFIG_MALI_DMA_FENCE
- int dep_count;
-
- dep_count = kbase_fence_dep_count_read(dep_atom);
- if (likely(dep_count == -1)) {
- dep_satisfied = true;
- } else {
- /*
- * There are either still active callbacks, or
- * all fences for this @dep_atom has signaled,
- * but the worker that will queue the atom has
- * not yet run.
- *
- * Wait for the fences to signal and the fence
- * worker to run and handle @dep_atom. If
- * @dep_atom was completed due to error on
- * @katom, then the fence worker will pick up
- * the complete status and error code set on
- * @dep_atom above.
- */
- dep_satisfied = false;
- }
-#endif /* CONFIG_MALI_DMA_FENCE */
-
- if (dep_satisfied) {
- dep_atom->in_jd_list = true;
- list_add_tail(&dep_atom->jd_item, out_list);
- }
+ dep_atom->in_jd_list = true;
+ list_add_tail(&dep_atom->jd_item, out_list);
}
}
}
@@ -526,33 +377,8 @@ static void jd_try_submitting_deps(struct list_head *out_list,
dep_atom->dep[0].atom);
bool dep1_valid = is_dep_valid(
dep_atom->dep[1].atom);
- bool dep_satisfied = true;
-#ifdef CONFIG_MALI_DMA_FENCE
- int dep_count;
-
- dep_count = kbase_fence_dep_count_read(
- dep_atom);
- if (likely(dep_count == -1)) {
- dep_satisfied = true;
- } else {
- /*
- * There are either still active callbacks, or
- * all fences for this @dep_atom has signaled,
- * but the worker that will queue the atom has
- * not yet run.
- *
- * Wait for the fences to signal and the fence
- * worker to run and handle @dep_atom. If
- * @dep_atom was completed due to error on
- * @katom, then the fence worker will pick up
- * the complete status and error code set on
- * @dep_atom above.
- */
- dep_satisfied = false;
- }
-#endif /* CONFIG_MALI_DMA_FENCE */
- if (dep0_valid && dep1_valid && dep_satisfied) {
+ if (dep0_valid && dep1_valid) {
dep_atom->in_jd_list = true;
list_add(&dep_atom->jd_item, out_list);
}
@@ -780,10 +606,13 @@ static void jd_mark_simple_gfx_frame_atoms(struct kbase_jd_atom *katom)
}
if (dep_fence && dep_vtx) {
+ unsigned long flags;
dev_dbg(kbdev->dev, "Simple gfx frame: {vtx=%pK, wait=%pK}->frag=%pK\n",
dep_vtx, dep_fence, katom);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
katom->atom_flags |= KBASE_KATOM_FLAG_SIMPLE_FRAME_FRAGMENT;
dep_vtx->atom_flags |= KBASE_KATOM_FLAG_DEFER_WHILE_POWEROFF;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
}
@@ -796,7 +625,7 @@ static void jd_mark_simple_gfx_frame_atoms(struct kbase_jd_atom *katom)
*
* The caller must hold the kbase_jd_context.lock.
*/
-bool jd_done_nolock(struct kbase_jd_atom *katom, bool post_immediately)
+bool kbase_jd_done_nolock(struct kbase_jd_atom *katom, bool post_immediately)
{
struct kbase_context *kctx = katom->kctx;
struct list_head completed_jobs;
@@ -804,6 +633,8 @@ bool jd_done_nolock(struct kbase_jd_atom *katom, bool post_immediately)
bool need_to_try_schedule_context = false;
int i;
+ lockdep_assert_held(&kctx->jctx.lock);
+
KBASE_TLSTREAM_TL_JD_DONE_NO_LOCK_START(kctx->kbdev, katom);
INIT_LIST_HEAD(&completed_jobs);
@@ -855,14 +686,15 @@ bool jd_done_nolock(struct kbase_jd_atom *katom, bool post_immediately)
dev_dbg(kctx->kbdev->dev,
"Simple-frame fragment atom %pK unblocked\n",
node);
- node->atom_flags &=
- ~KBASE_KATOM_FLAG_SIMPLE_FRAME_FRAGMENT;
for (i = 0; i < 2; i++) {
if (node->dep[i].atom &&
node->dep[i].atom->atom_flags &
KBASE_KATOM_FLAG_DEFER_WHILE_POWEROFF) {
+ unsigned long flags;
+ spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
node->dep[i].atom->atom_flags &=
~KBASE_KATOM_FLAG_DEFER_WHILE_POWEROFF;
+ spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
dev_dbg(kctx->kbdev->dev,
" Undeferred atom %pK\n",
node->dep[i].atom);
@@ -936,7 +768,7 @@ bool jd_done_nolock(struct kbase_jd_atom *katom, bool post_immediately)
return need_to_try_schedule_context;
}
-KBASE_EXPORT_TEST_API(jd_done_nolock);
+KBASE_EXPORT_TEST_API(kbase_jd_done_nolock);
#if IS_ENABLED(CONFIG_GPU_TRACEPOINTS)
enum {
@@ -1044,7 +876,6 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
katom->jobslot = user_atom->jobslot;
katom->seq_nr = user_atom->seq_nr;
katom->atom_flags = 0;
- katom->retry_count = 0;
katom->need_cache_flush_cores_retained = 0;
katom->pre_dep = NULL;
katom->post_dep = NULL;
@@ -1078,9 +909,6 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
INIT_LIST_HEAD(&katom->queue);
INIT_LIST_HEAD(&katom->jd_item);
-#ifdef CONFIG_MALI_DMA_FENCE
- kbase_fence_dep_count_set(katom, -1);
-#endif
/* Don't do anything if there is a mess up with dependencies.
* This is done in a separate cycle to check both the dependencies at ones, otherwise
@@ -1105,7 +933,7 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
* dependencies.
*/
jd_trace_atom_submit(kctx, katom, NULL);
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
}
}
@@ -1169,7 +997,7 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
if (err >= 0)
kbase_finish_soft_job(katom);
}
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
katom->will_fail_event_code = katom->event_code;
@@ -1195,7 +1023,7 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
/* Create a new atom. */
jd_trace_atom_submit(kctx, katom, &katom->sched_priority);
-#if !MALI_INCREMENTAL_RENDERING
+#if !MALI_INCREMENTAL_RENDERING_JM
/* Reject atoms for incremental rendering if not supported */
if (katom->core_req &
(BASE_JD_REQ_START_RENDERPASS|BASE_JD_REQ_END_RENDERPASS)) {
@@ -1203,9 +1031,9 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
"Rejecting atom with unsupported core_req 0x%x\n",
katom->core_req);
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
-#endif /* !MALI_INCREMENTAL_RENDERING */
+#endif /* !MALI_INCREMENTAL_RENDERING_JM */
if (katom->core_req & BASE_JD_REQ_END_RENDERPASS) {
WARN_ON(katom->jc != 0);
@@ -1217,7 +1045,7 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
*/
dev_err(kctx->kbdev->dev, "Rejecting atom with jc = NULL\n");
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
/* Reject atoms with an invalid device_nr */
@@ -1227,7 +1055,7 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
"Rejecting atom with invalid device_nr %d\n",
katom->device_nr);
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
/* Reject atoms with invalid core requirements */
@@ -1237,7 +1065,7 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
"Rejecting atom with invalid core requirements\n");
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
katom->core_req &= ~BASE_JD_REQ_EVENT_COALESCE;
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
/* Reject soft-job atom of certain types from accessing external resources */
@@ -1248,7 +1076,7 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
dev_err(kctx->kbdev->dev,
"Rejecting soft-job atom accessing external resources\n");
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
@@ -1256,7 +1084,7 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
if (kbase_jd_pre_external_resources(katom, user_atom) != 0) {
/* setup failed (no access, bad resource, unknown resource types, etc.) */
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
}
@@ -1267,7 +1095,7 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
* JIT IDs - atom is invalid.
*/
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
@@ -1281,13 +1109,13 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
if ((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0) {
if (!kbase_js_is_atom_valid(kctx->kbdev, katom)) {
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
} else {
/* Soft-job */
if (kbase_prepare_soft_job(katom) != 0) {
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
}
@@ -1302,16 +1130,10 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
if (queued && !IS_GPU_ATOM(katom))
return false;
-#ifdef CONFIG_MALI_DMA_FENCE
- if (kbase_fence_dep_count_read(katom) != -1)
- return false;
-
-#endif /* CONFIG_MALI_DMA_FENCE */
-
if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
if (kbase_process_soft_job(katom) == 0) {
kbase_finish_soft_job(katom);
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
return false;
}
@@ -1341,7 +1163,7 @@ static bool jd_submit_atom(struct kbase_context *const kctx,
}
/* This is a pure dependency. Resolve it immediately */
- return jd_done_nolock(katom, true);
+ return kbase_jd_done_nolock(katom, true);
}
int kbase_jd_submit(struct kbase_context *kctx,
@@ -1379,18 +1201,26 @@ int kbase_jd_submit(struct kbase_context *kctx,
return -EINVAL;
}
+ if (nr_atoms > BASE_JD_ATOM_COUNT) {
+ dev_dbg(kbdev->dev, "Invalid attempt to submit %u atoms at once for kctx %d_%d",
+ nr_atoms, kctx->tgid, kctx->id);
+ return -EINVAL;
+ }
+
/* All atoms submitted in this call have the same flush ID */
latest_flush = kbase_backend_get_current_flush_id(kbdev);
for (i = 0; i < nr_atoms; i++) {
- struct base_jd_atom user_atom;
+ struct base_jd_atom user_atom = {
+ .seq_nr = 0,
+ };
struct base_jd_fragment user_jc_incr;
struct kbase_jd_atom *katom;
if (unlikely(jd_atom_is_v2)) {
if (copy_from_user(&user_atom.jc, user_addr, sizeof(struct base_jd_atom_v2)) != 0) {
dev_dbg(kbdev->dev,
- "Invalid atom address %p passed to job_submit\n",
+ "Invalid atom address %pK passed to job_submit\n",
user_addr);
err = -EFAULT;
break;
@@ -1401,7 +1231,7 @@ int kbase_jd_submit(struct kbase_context *kctx,
} else {
if (copy_from_user(&user_atom, user_addr, stride) != 0) {
dev_dbg(kbdev->dev,
- "Invalid atom address %p passed to job_submit\n",
+ "Invalid atom address %pK passed to job_submit\n",
user_addr);
err = -EFAULT;
break;
@@ -1507,6 +1337,12 @@ while (false)
kbase_disjoint_event_potential(kbdev);
rt_mutex_unlock(&jctx->lock);
+ if (fatal_signal_pending(current)) {
+ dev_dbg(kbdev->dev, "Fatal signal pending for kctx %d_%d",
+ kctx->tgid, kctx->id);
+ /* We're being killed so the result code doesn't really matter */
+ return 0;
+ }
}
if (need_to_try_schedule_context)
@@ -1598,8 +1434,8 @@ void kbase_jd_done_worker(struct kthread_work *data)
kbasep_js_remove_job(kbdev, kctx, katom);
rt_mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
rt_mutex_unlock(&js_devdata->queue_mutex);
- /* jd_done_nolock() requires the jsctx_mutex lock to be dropped */
- jd_done_nolock(katom, false);
+ /* kbase_jd_done_nolock() requires the jsctx_mutex lock to be dropped */
+ kbase_jd_done_nolock(katom, false);
/* katom may have been freed now, do not use! */
@@ -1665,7 +1501,7 @@ void kbase_jd_done_worker(struct kthread_work *data)
kbase_js_sched_all(kbdev);
if (!atomic_dec_return(&kctx->work_count)) {
- /* If worker now idle then post all events that jd_done_nolock()
+ /* If worker now idle then post all events that kbase_jd_done_nolock()
* has queued
*/
rt_mutex_lock(&jctx->lock);
@@ -1711,8 +1547,10 @@ static void jd_cancel_worker(struct kthread_work *data)
struct kbase_jd_context *jctx;
struct kbase_context *kctx;
struct kbasep_js_kctx_info *js_kctx_info;
+ bool need_to_try_schedule_context;
bool attr_state_changed;
struct kbase_device *kbdev;
+ CSTD_UNUSED(need_to_try_schedule_context);
/* Soft jobs should never reach this function */
KBASE_DEBUG_ASSERT((katom->core_req & BASE_JD_REQ_SOFT_JOB) == 0);
@@ -1738,7 +1576,13 @@ static void jd_cancel_worker(struct kthread_work *data)
rt_mutex_lock(&jctx->lock);
- jd_done_nolock(katom, true);
+ need_to_try_schedule_context = kbase_jd_done_nolock(katom, true);
+ /* Because we're zapping, we're not adding any more jobs to this ctx, so no need to
+ * schedule the context. There's also no need for the jsctx_mutex to have been taken
+ * around this too.
+ */
+ KBASE_DEBUG_ASSERT(!need_to_try_schedule_context);
+ CSTD_UNUSED(need_to_try_schedule_context);
/* katom may have been freed now, do not use! */
rt_mutex_unlock(&jctx->lock);
@@ -1777,6 +1621,8 @@ void kbase_jd_done(struct kbase_jd_atom *katom, int slot_nr,
kbdev = kctx->kbdev;
KBASE_DEBUG_ASSERT(kbdev);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT)
katom->event_code = BASE_JD_EVENT_REMOVED_FROM_NEXT;
@@ -1854,20 +1700,8 @@ void kbase_jd_zap_context(struct kbase_context *kctx)
kbase_cancel_soft_job(katom);
}
-
-#ifdef CONFIG_MALI_DMA_FENCE
- kbase_dma_fence_cancel_all_atoms(kctx);
-#endif
-
rt_mutex_unlock(&kctx->jctx.lock);
-#ifdef CONFIG_MALI_DMA_FENCE
- /* Flush dma-fence workqueue to ensure that any callbacks that may have
- * been queued are done before continuing.
- */
- flush_workqueue(kctx->dma_fence.wq);
-#endif
-
#if IS_ENABLED(CONFIG_DEBUG_FS)
kbase_debug_job_fault_kctx_unblock(kctx);
#endif
@@ -1896,11 +1730,10 @@ int kbase_jd_init(struct kbase_context *kctx)
kctx->jctx.atoms[i].event_code = BASE_JD_EVENT_JOB_INVALID;
kctx->jctx.atoms[i].status = KBASE_JD_ATOM_STATE_UNUSED;
-#if defined(CONFIG_MALI_DMA_FENCE) || defined(CONFIG_SYNC_FILE)
+#if IS_ENABLED(CONFIG_SYNC_FILE)
kctx->jctx.atoms[i].dma_fence.context =
dma_fence_context_alloc(1);
atomic_set(&kctx->jctx.atoms[i].dma_fence.seqno, 0);
- INIT_LIST_HEAD(&kctx->jctx.atoms[i].dma_fence.callbacks);
#endif
}