summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-08-11 19:44:38 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-08-11 19:44:38 +0000
commitfb5aa0cebc6104899383bf0b45a72f2e112d7381 (patch)
treece53ef0fc291fea44cc7583f2d63d54592314b60
parent296ac8b24393d95216428aff724eba33e53e7163 (diff)
parentd4438cd093ebaaba076e4522f8e80349104ee9f4 (diff)
downloadgpu-android-gs-tangorpro-5.10-u-beta5.2.tar.gz
SPARSE_CHANGE: I37407809d7187cb5c8fac63e6c10f72cc3bd762d Change-Id: If155b5cb1c4aba3861ed8b5c0ee3667709201b03
-rw-r--r--mali_kbase/context/mali_kbase_context.c20
-rw-r--r--mali_kbase/mali_kbase_defs.h9
-rw-r--r--mali_kbase/mali_kbase_mem.h11
-rw-r--r--mali_kbase/mali_kbase_mem_linux.c64
4 files changed, 30 insertions, 74 deletions
diff --git a/mali_kbase/context/mali_kbase_context.c b/mali_kbase/context/mali_kbase_context.c
index 8787a56..7dece16 100644
--- a/mali_kbase/context/mali_kbase_context.c
+++ b/mali_kbase/context/mali_kbase_context.c
@@ -182,7 +182,6 @@ int kbase_context_common_init(struct kbase_context *kctx)
/* creating a context is considered a disjoint event */
kbase_disjoint_event(kctx->kbdev);
- spin_lock_init(&kctx->mm_update_lock);
kctx->process_mm = NULL;
kctx->task = NULL;
atomic_set(&kctx->nonmapped_pages, 0);
@@ -223,6 +222,13 @@ int kbase_context_common_init(struct kbase_context *kctx)
if (unlikely(err))
return err;
+
+ /* This merely takes a reference on the mm_struct and not on the
+ * address space and so won't block the freeing of address space
+ * on process exit.
+ */
+ mmgrab(current->mm);
+ kctx->process_mm = current->mm;
}
atomic_set(&kctx->used_pages, 0);
@@ -256,14 +262,18 @@ int kbase_context_common_init(struct kbase_context *kctx)
mutex_lock(&kctx->kbdev->kctx_list_lock);
err = kbase_insert_kctx_to_process(kctx);
- mutex_unlock(&kctx->kbdev->kctx_list_lock);
+
if (err) {
dev_err(kctx->kbdev->dev,
"(err:%d) failed to insert kctx to kbase_process", err);
- if (likely(kctx->filp))
+ if (likely(kctx->filp)) {
put_task_struct(kctx->task);
+ mmdrop(kctx->process_mm);
+ }
}
+ mutex_unlock(&kctx->kbdev->kctx_list_lock);
+
return err;
}
@@ -350,8 +360,10 @@ void kbase_context_common_term(struct kbase_context *kctx)
kbase_remove_kctx_from_process(kctx);
mutex_unlock(&kctx->kbdev->kctx_list_lock);
- if (likely(kctx->filp))
+ if (likely(kctx->filp)) {
put_task_struct(kctx->task);
+ mmdrop(kctx->process_mm);
+ }
KBASE_KTRACE_ADD(kctx->kbdev, CORE_CTX_DESTROY, kctx, 0u);
}
diff --git a/mali_kbase/mali_kbase_defs.h b/mali_kbase/mali_kbase_defs.h
index 347f15c..02fbb7f 100644
--- a/mali_kbase/mali_kbase_defs.h
+++ b/mali_kbase/mali_kbase_defs.h
@@ -1644,11 +1644,13 @@ struct kbase_sub_alloc {
* is scheduled in and an atom is pulled from the context's per
* slot runnable tree in JM GPU or GPU command queue
* group is programmed on CSG slot in CSF GPU.
- * @mm_update_lock: lock used for handling of special tracking page.
* @process_mm: Pointer to the memory descriptor of the process which
* created the context. Used for accounting the physical
* pages used for GPU allocations, done for the context,
- * to the memory consumed by the process.
+ * to the memory consumed by the process. A reference is taken
+ * on this descriptor for the Userspace created contexts so that
+ * Kbase can safely access it to update the memory usage counters.
+ * The reference is dropped on context termination.
* @gpu_va_end: End address of the GPU va space (in 4KB page units)
* @running_total_tiler_heap_nr_chunks: Running total of number of chunks in all
* tiler heaps of the kbase context.
@@ -1873,8 +1875,7 @@ struct kbase_context {
atomic_t refcount;
- spinlock_t mm_update_lock;
- struct mm_struct __rcu *process_mm;
+ struct mm_struct *process_mm;
u64 gpu_va_end;
#if MALI_USE_CSF
u32 running_total_tiler_heap_nr_chunks;
diff --git a/mali_kbase/mali_kbase_mem.h b/mali_kbase/mali_kbase_mem.h
index 9f8be10..0e4dcfe 100644
--- a/mali_kbase/mali_kbase_mem.h
+++ b/mali_kbase/mali_kbase_mem.h
@@ -2300,8 +2300,7 @@ kbase_ctx_reg_zone_get(struct kbase_context *kctx, unsigned long zone_bits)
* kbase_mem_allow_alloc - Check if allocation of GPU memory is allowed
* @kctx: Pointer to kbase context
*
- * Don't allow the allocation of GPU memory until user space has set up the
- * tracking page (which sets kctx->process_mm) or if the ioctl has been issued
+ * Don't allow the allocation of GPU memory if the ioctl has been issued
* from the forked child process using the mali device file fd inherited from
* the parent process.
*
@@ -2309,13 +2308,7 @@ kbase_ctx_reg_zone_get(struct kbase_context *kctx, unsigned long zone_bits)
*/
static inline bool kbase_mem_allow_alloc(struct kbase_context *kctx)
{
- bool allow_alloc = true;
-
- rcu_read_lock();
- allow_alloc = (rcu_dereference(kctx->process_mm) == current->mm);
- rcu_read_unlock();
-
- return allow_alloc;
+ return (kctx->process_mm == current->mm);
}
/**
diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c
index 957b5da..ffa0ebc 100644
--- a/mali_kbase/mali_kbase_mem_linux.c
+++ b/mali_kbase/mali_kbase_mem_linux.c
@@ -3211,79 +3211,29 @@ static void kbasep_add_mm_counter(struct mm_struct *mm, int member, long value)
void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages)
{
- struct mm_struct *mm;
+ struct mm_struct *mm = kctx->process_mm;
- rcu_read_lock();
- mm = rcu_dereference(kctx->process_mm);
- if (mm) {
- atomic_add(pages, &kctx->nonmapped_pages);
-#ifdef SPLIT_RSS_COUNTING
- kbasep_add_mm_counter(mm, MM_FILEPAGES, pages);
-#else
- spin_lock(&mm->page_table_lock);
- kbasep_add_mm_counter(mm, MM_FILEPAGES, pages);
- spin_unlock(&mm->page_table_lock);
-#endif
- }
- rcu_read_unlock();
-}
-
-static void kbasep_os_process_page_usage_drain(struct kbase_context *kctx)
-{
- int pages;
- struct mm_struct *mm;
-
- spin_lock(&kctx->mm_update_lock);
- mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock));
- if (!mm) {
- spin_unlock(&kctx->mm_update_lock);
+ if (unlikely(!mm))
return;
- }
- rcu_assign_pointer(kctx->process_mm, NULL);
- spin_unlock(&kctx->mm_update_lock);
- synchronize_rcu();
-
- pages = atomic_xchg(&kctx->nonmapped_pages, 0);
+ atomic_add(pages, &kctx->nonmapped_pages);
#ifdef SPLIT_RSS_COUNTING
- kbasep_add_mm_counter(mm, MM_FILEPAGES, -pages);
+ kbasep_add_mm_counter(mm, MM_FILEPAGES, pages);
#else
spin_lock(&mm->page_table_lock);
- kbasep_add_mm_counter(mm, MM_FILEPAGES, -pages);
+ kbasep_add_mm_counter(mm, MM_FILEPAGES, pages);
spin_unlock(&mm->page_table_lock);
#endif
}
-static void kbase_special_vm_close(struct vm_area_struct *vma)
-{
- struct kbase_context *kctx;
-
- kctx = vma->vm_private_data;
- kbasep_os_process_page_usage_drain(kctx);
-}
-
-static const struct vm_operations_struct kbase_vm_special_ops = {
- .close = kbase_special_vm_close,
-};
-
static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma)
{
- /* check that this is the only tracking page */
- spin_lock(&kctx->mm_update_lock);
- if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock))) {
- spin_unlock(&kctx->mm_update_lock);
- return -EFAULT;
- }
-
- rcu_assign_pointer(kctx->process_mm, current->mm);
-
- spin_unlock(&kctx->mm_update_lock);
+ if (vma_pages(vma) != 1)
+ return -EINVAL;
/* no real access */
vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
- vma->vm_ops = &kbase_vm_special_ops;
- vma->vm_private_data = kctx;
return 0;
}