diff options
author | Ben Fennema <fennema@google.com> | 2023-08-14 12:35:26 -0700 |
---|---|---|
committer | Ben Fennema <fennema@google.com> | 2023-08-14 12:35:51 -0700 |
commit | ab0e04a3cedcba1eca67bbf498cff4fa3582cc92 (patch) | |
tree | 3d4dd38dfce2afb02ba334edd14625bcc9fc00e7 | |
parent | c4888e3b848f4a7d8f469fa1d7a147c553d55a67 (diff) | |
parent | eaa359f757545f9f1624b46e981954bdc910d71e (diff) | |
download | exynos-ab0e04a3cedcba1eca67bbf498cff4fa3582cc92.tar.gz |
Merge branch 'partner/android-exynos-r11-4.19-rvc-jr3' into android-exynos-r11-4.19
OCT 2023.1
Bug: 287225144
Change-Id: Ic31bce79c4849438c36af5f5da10ad9f510197ad
-rw-r--r-- | drivers/gpu/arm/t72x/r29p0/mali_kbase_context.c | 37 | ||||
-rw-r--r-- | drivers/gpu/arm/t72x/r29p0/mali_kbase_context.h | 5 | ||||
-rw-r--r-- | drivers/gpu/arm/t72x/r29p0/mali_kbase_core_linux.c | 5 | ||||
-rw-r--r-- | drivers/gpu/arm/t72x/r29p0/mali_kbase_defs.h | 19 | ||||
-rw-r--r-- | drivers/gpu/arm/t72x/r29p0/mali_kbase_hwcnt_backend_gpu.c | 2 | ||||
-rw-r--r-- | drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.c | 19 | ||||
-rw-r--r-- | drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.h | 12 | ||||
-rw-r--r-- | drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_linux.c | 65 | ||||
-rw-r--r-- | drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_pool.c | 73 | ||||
-rw-r--r-- | drivers/gpu/arm/t72x/r29p0/mali_kbase_mmu.c | 16 |
10 files changed, 158 insertions, 95 deletions
diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.c index 6a8d1e024535..b51922179b0e 100644 --- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.c +++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.c @@ -25,6 +25,12 @@ /* * Base kernel context APIs */ +#include <linux/version.h> +#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE +#include <linux/sched/task.h> +#else +#include <linux/sched.h> +#endif #include <mali_kbase.h> #include <mali_midg_regmap.h> @@ -33,12 +39,17 @@ #include <mali_kbase_ctx_sched.h> struct kbase_context * -kbase_create_context(struct kbase_device *kbdev, bool is_compat) +kbase_create_context(struct kbase_device *kbdev, bool is_compat, + struct file *filp) { struct kbase_context *kctx; int err; struct page *p; + struct pid *pid_struct; + struct task_struct *task; + + KBASE_DEBUG_ASSERT(kbdev != NULL); /* zero-inited as lot of code assume it's zero'ed out on create */ @@ -51,6 +62,7 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat) kbase_disjoint_event(kbdev); kctx->kbdev = kbdev; + kctx->filp = filp; if (is_compat) kbase_ctx_flag_set(kctx, KCTX_COMPAT); #if defined(CONFIG_64BIT) @@ -60,13 +72,29 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat) atomic_set(&kctx->setup_complete, 0); atomic_set(&kctx->setup_in_progress, 0); - spin_lock_init(&kctx->mm_update_lock); kctx->process_mm = NULL; atomic_set(&kctx->nonmapped_pages, 0); kctx->slots_pullable = 0; kctx->tgid = current->tgid; kctx->pid = current->pid; + /* Check if this is a Userspace created context */ + if (likely(kctx->filp)) { + rcu_read_lock(); + pid_struct = find_get_pid(kctx->tgid); + task = pid_task(pid_struct, PIDTYPE_PID); + get_task_struct(task); + kctx->task = task; + put_pid(pid_struct); + rcu_read_unlock(); + /* This merely takes a reference on the mm_struct and not on the + * address space and so won't block the freeing of address space + * on process exit. + */ + mmgrab(current->mm); + kctx->process_mm = current->mm; + } + err = kbase_mem_pool_init(&kctx->mem_pool, kbdev->mem_pool_max_size_default, KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER, @@ -323,6 +351,11 @@ void kbase_destroy_context(struct kbase_context *kctx) kctx->ctx_need_qos = false; } + if (likely(kctx->filp)) { + mmdrop(kctx->process_mm); + put_task_struct(kctx->task); + } + vfree(kctx); /* MALI_SEC_INTEGRATION */ kctx = NULL; diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.h b/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.h index 30b0f649806b..80f2a9c2f06e 100644 --- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.h +++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.h @@ -29,13 +29,16 @@ * kbase_create_context() - Create a kernel base context. * @kbdev: Kbase device * @is_compat: Force creation of a 32-bit context + * @filp: Pointer to the struct file corresponding to device file /dev/malixx + * instance, passed to the file's open method. * * Allocate and init a kernel base context. * * Return: new kbase context */ struct kbase_context * -kbase_create_context(struct kbase_device *kbdev, bool is_compat); +kbase_create_context(struct kbase_device *kbdev, bool is_compat, + struct file *filp); /** * kbase_destroy_context - Destroy a kernel base context. diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_core_linux.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_core_linux.c index 6fd61a9dbef1..162a21e0a6c1 100644 --- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_core_linux.c +++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_core_linux.c @@ -406,9 +406,9 @@ static int kbase_open(struct inode *inode, struct file *filp) return -ENODEV; #if (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE) - kctx = kbase_create_context(kbdev, in_compat_syscall()); + kctx = kbase_create_context(kbdev, in_compat_syscall(), filp); #else - kctx = kbase_create_context(kbdev, is_compat_task()); + kctx = kbase_create_context(kbdev, is_compat_task(), filp); #endif /* (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE) */ if (!kctx) { ret = -ENOMEM; @@ -418,7 +418,6 @@ static int kbase_open(struct inode *inode, struct file *filp) init_waitqueue_head(&kctx->event_queue); filp->private_data = kctx; filp->f_mode |= FMODE_UNSIGNED_OFFSET; - kctx->filp = filp; if (kbdev->infinite_cache_active_default) kbase_ctx_flag_set(kctx, KCTX_INFINITE_CACHE); diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_defs.h b/drivers/gpu/arm/t72x/r29p0/mali_kbase_defs.h index 12dd75f1181e..640fd5636ee1 100644 --- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_defs.h +++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_defs.h @@ -1974,7 +1974,11 @@ struct kbase_reg_zone { * @process_mm: Pointer to the memory descriptor of the process which * created the context. Used for accounting the physical * pages used for GPU allocations, done for the context, - * to the memory consumed by the process. + * to the memory consumed by the process. A reference is + * taken on this descriptor for the Userspace created + * contexts so that Kbase can safely access it to update + * the memory usage counters. The reference is dropped on + * context termination. * @gpu_va_end: End address of the GPU va space (in 4KB page units) * @jit_va: Indicates if a JIT_VA zone has been created. * @timeline: Object tracking the number of atoms currently in flight for @@ -2071,6 +2075,8 @@ struct kbase_reg_zone { * @priority: Indicates the context priority. Used along with @atoms_count * for context scheduling, protected by hwaccess_lock. * @atoms_count: Number of gpu atoms currently in use, per priority + * @task: Pointer to the task structure of the main thread of + * the process that created the Kbase context. */ struct kbase_context { struct file *filp; @@ -2136,14 +2142,7 @@ struct kbase_context { atomic_t refcount; - /* NOTE: - * - * Flags are in jctx.sched_info.ctx.flags - * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex - * - * All other flags must be added there */ - spinlock_t mm_update_lock; - struct mm_struct __rcu *process_mm; + struct mm_struct *process_mm; u64 gpu_va_end; bool jit_va; @@ -2226,6 +2225,8 @@ struct kbase_context { int priority; s16 atoms_count[KBASE_JS_ATOM_SCHED_PRIO_COUNT]; + struct task_struct *task; + /* MALI_SEC_INTEGRATION */ bool destroying_context; atomic_t mem_profile_showing_state; diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_hwcnt_backend_gpu.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_hwcnt_backend_gpu.c index 3e35463548df..5327831e4a8c 100644 --- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_hwcnt_backend_gpu.c +++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_hwcnt_backend_gpu.c @@ -353,7 +353,7 @@ static int kbasep_hwcnt_backend_gpu_create( backend->info = info; - backend->kctx = kbase_create_context(kbdev, true); + backend->kctx = kbase_create_context(kbdev, true, NULL); if (!backend->kctx) goto alloc_error; diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.c index f98af64d2fbe..2fac9c910817 100644 --- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.c +++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.c @@ -1962,9 +1962,10 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, int nr_lp = nr_left / (SZ_2M / SZ_4K); res = kbase_mem_pool_alloc_pages(&kctx->lp_mem_pool, - nr_lp * (SZ_2M / SZ_4K), - tp, - true); + nr_lp * (SZ_2M / SZ_4K), + tp, + true, + kctx->task); if (res > 0) { nr_left -= res; @@ -2013,7 +2014,8 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, np = kbase_mem_pool_alloc(&kctx->lp_mem_pool); if (np) break; - err = kbase_mem_pool_grow(&kctx->lp_mem_pool, 1); + err = kbase_mem_pool_grow(&kctx->lp_mem_pool, 1, + kctx->task); if (err) break; } while (1); @@ -2055,9 +2057,10 @@ no_new_partial: if (nr_left) { res = kbase_mem_pool_alloc_pages(&kctx->mem_pool, - nr_left, - tp, - false); + nr_left, + tp, + false, + kctx->task); if (res <= 0) goto alloc_failed; } @@ -3161,7 +3164,7 @@ static int kbase_jit_grow(struct kbase_context *kctx, spin_unlock(&kctx->mem_partials_lock); kbase_gpu_vm_unlock(kctx); - if (kbase_mem_pool_grow(pool, pool_delta)) + if (kbase_mem_pool_grow(pool, pool_delta, kctx->task)) goto update_failed_unlocked; kbase_gpu_vm_lock(kctx); diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.h b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.h index d0d28271606c..4dccd403acb2 100644 --- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.h +++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.h @@ -746,6 +746,9 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p, * @pages: Pointer to array where the physical address of the allocated * pages will be stored. * @partial_allowed: If fewer pages allocated is allowed + * @page_owner: Pointer to the task that created the Kbase context for which + * the pages are being allocated. It can be NULL if the pages + * won't be associated with any Kbase context. * * Like kbase_mem_pool_alloc() but optimized for allocating many pages. * @@ -762,7 +765,8 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p, * this lock, it should use kbase_mem_pool_alloc_pages_locked() instead. */ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages, - struct tagged_addr *pages, bool partial_allowed); + struct tagged_addr *pages, bool partial_allowed, + struct task_struct *page_owner); /** * kbase_mem_pool_alloc_pages_locked - Allocate pages from memory pool @@ -874,13 +878,17 @@ void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size); * kbase_mem_pool_grow - Grow the pool * @pool: Memory pool to grow * @nr_to_grow: Number of pages to add to the pool + * @page_owner: Pointer to the task that created the Kbase context for which + * the memory pool is being grown. It can be NULL if the pages + * to be allocated won't be associated with any Kbase context. * * Adds @nr_to_grow pages to the pool. Note that this may cause the pool to * become larger than the maximum size specified. * * Returns: 0 on success, -ENOMEM if unable to allocate sufficent pages */ -int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow); +int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow, + struct task_struct *page_owner); /** * kbase_mem_pool_trim - Grow or shrink the pool to a new size diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_linux.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_linux.c index 7cdf510eed9a..2d40764df241 100644 --- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_linux.c +++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_linux.c @@ -2502,73 +2502,25 @@ KBASE_EXPORT_TEST_API(kbase_vunmap); void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages) { - struct mm_struct *mm; + struct mm_struct *mm = kctx->process_mm; - rcu_read_lock(); - mm = rcu_dereference(kctx->process_mm); - if (mm) { - atomic_add(pages, &kctx->nonmapped_pages); -#ifdef SPLIT_RSS_COUNTING - add_mm_counter(mm, MM_FILEPAGES, pages); -#else - spin_lock(&mm->page_table_lock); - add_mm_counter(mm, MM_FILEPAGES, pages); - spin_unlock(&mm->page_table_lock); -#endif - } - rcu_read_unlock(); -} - -static void kbasep_os_process_page_usage_drain(struct kbase_context *kctx) -{ - int pages; - struct mm_struct *mm; - - spin_lock(&kctx->mm_update_lock); - mm = rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock)); - if (!mm) { - spin_unlock(&kctx->mm_update_lock); + if (unlikely(!mm)) { return; } - - rcu_assign_pointer(kctx->process_mm, NULL); - spin_unlock(&kctx->mm_update_lock); - synchronize_rcu(); - - pages = atomic_xchg(&kctx->nonmapped_pages, 0); + atomic_add(pages, &kctx->nonmapped_pages); #ifdef SPLIT_RSS_COUNTING - add_mm_counter(mm, MM_FILEPAGES, -pages); + add_mm_counter(mm, MM_FILEPAGES, pages); #else spin_lock(&mm->page_table_lock); - add_mm_counter(mm, MM_FILEPAGES, -pages); + add_mm_counter(mm, MM_FILEPAGES, pages); spin_unlock(&mm->page_table_lock); #endif } -static void kbase_special_vm_close(struct vm_area_struct *vma) -{ - struct kbase_context *kctx; - - kctx = vma->vm_private_data; - kbasep_os_process_page_usage_drain(kctx); -} - -static const struct vm_operations_struct kbase_vm_special_ops = { - .close = kbase_special_vm_close, -}; - static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma) { - /* check that this is the only tracking page */ - spin_lock(&kctx->mm_update_lock); - if (rcu_dereference_protected(kctx->process_mm, lockdep_is_held(&kctx->mm_update_lock))) { - spin_unlock(&kctx->mm_update_lock); - return -EFAULT; - } - - rcu_assign_pointer(kctx->process_mm, current->mm); - - spin_unlock(&kctx->mm_update_lock); + if (vma_pages(vma) != 1) + return -EINVAL; /* no real access */ vma->vm_flags &= ~(VM_READ | VM_MAYREAD | VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC); @@ -2577,9 +2529,6 @@ static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_ #else vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_IO; #endif - vma->vm_ops = &kbase_vm_special_ops; - vma->vm_private_data = kctx; - return 0; } diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_pool.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_pool.c index 0f91be17a81b..3def6fbcc9da 100644 --- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_pool.c +++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_pool.c @@ -28,6 +28,11 @@ #include <linux/shrinker.h> #include <linux/atomic.h> #include <linux/version.h> +#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE +#include <linux/sched/signal.h> +#else +#include <linux/signal.h> +#endif #define pool_dbg(pool, format, ...) \ dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \ @@ -39,6 +44,52 @@ #define NOT_DIRTY false #define NOT_RECLAIMED false +/** + * can_alloc_page() - Check if the current thread can allocate a physical page + * + * @pool: Pointer to the memory pool. + * @page_owner: Pointer to the task/process that created the Kbase + * context for which a page needs to be allocated. It can + * be NULL if the page won't be associated with Kbase + * context. + * @alloc_from_kthread: Flag indicating that the current thread is a kernel + * thread. + * + * This function checks if the current thread is a kernel thread and can make a + * request to kernel to allocate a physical page. If the kernel thread is + * allocating a page for the Kbase context and the process that created the + * context is exiting or is being killed, then there is no point in doing a + * page allocation. + * + * The check done by the function is particularly helpful when the system is + * running low on memory. When a page is allocated from the context of a kernel + * thread, OoM killer doesn't consider the kernel thread for killing and kernel + * keeps retrying to allocate the page as long as the OoM killer is able to + * kill processes. The check allows kernel thread to quickly exit the page + * allocation loop once OoM killer has initiated the killing of @page_owner, + * thereby unblocking the context termination for @page_owner and freeing of + * GPU memory allocated by it. This helps in preventing the kernel panic and + * also limits the number of innocent processes that get killed. + * + * Return: true if the page can be allocated otherwise false. + */ +static inline bool can_alloc_page(struct kbase_mem_pool *pool, + struct task_struct *page_owner, + const bool alloc_from_kthread) +{ + if (likely(!alloc_from_kthread || !page_owner)) + return true; + + if ((page_owner->flags & PF_EXITING) || + fatal_signal_pending(page_owner)) { + dev_info(pool->kbdev->dev, "%s : Process %s/%d exiting", + __func__, page_owner->comm, task_pid_nr(page_owner)); + return false; + } + + return true; +} + static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool) { ssize_t max_size = kbase_mem_pool_max_size(pool); @@ -233,11 +284,12 @@ static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool, return nr_freed; } -int kbase_mem_pool_grow(struct kbase_mem_pool *pool, - size_t nr_to_grow) +int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow, + struct task_struct *page_owner) { struct page *p; size_t i; + const bool alloc_from_kthread = !!(current->flags & PF_KTHREAD); kbase_mem_pool_lock(pool); @@ -252,6 +304,10 @@ int kbase_mem_pool_grow(struct kbase_mem_pool *pool, } kbase_mem_pool_unlock(pool); + if (unlikely(!can_alloc_page(pool, page_owner, + alloc_from_kthread))) + return -ENOMEM; + p = kbase_mem_alloc_page(pool); if (!p) { kbase_mem_pool_lock(pool); @@ -283,7 +339,7 @@ void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size) if (new_size < cur_size) kbase_mem_pool_shrink(pool, cur_size - new_size); else if (new_size > cur_size) - err = kbase_mem_pool_grow(pool, new_size - cur_size); + err = kbase_mem_pool_grow(pool, new_size - cur_size, NULL); if (err) { size_t grown_size = kbase_mem_pool_size(pool); @@ -539,13 +595,15 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p, } int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages, - struct tagged_addr *pages, bool partial_allowed) + struct tagged_addr *pages, bool partial_allowed, + struct task_struct *page_owner) { struct page *p; size_t nr_from_pool; size_t i = 0; int err = -ENOMEM; size_t nr_pages_internal; + const bool alloc_from_kthread = !!(current->flags & PF_KTHREAD); nr_pages_internal = nr_4k_pages / (1u << (pool->order)); @@ -577,7 +635,8 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages, if (i != nr_4k_pages && pool->next_pool) { /* Allocate via next pool */ err = kbase_mem_pool_alloc_pages(pool->next_pool, - nr_4k_pages - i, pages + i, partial_allowed); + nr_4k_pages - i, pages + i, partial_allowed, + page_owner); if (err < 0) goto err_rollback; @@ -586,6 +645,10 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages, } else { /* Get any remaining pages from kernel */ while (i != nr_4k_pages) { + if (unlikely(!can_alloc_page(pool, page_owner, + alloc_from_kthread))) + goto err_rollback; + p = kbase_mem_alloc_page(pool); if (!p) { if (partial_allowed) diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mmu.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mmu.c index b7e2f03f7d4b..56f3d66a246d 100644 --- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mmu.c +++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mmu.c @@ -843,11 +843,11 @@ page_fault_retry: ((1 << kctx->lp_mem_pool.order) - 1)) >> kctx->lp_mem_pool.order; ret = kbase_mem_pool_grow(&kctx->lp_mem_pool, - pages_to_grow); + pages_to_grow, kctx->task); } else { #endif ret = kbase_mem_pool_grow(&kctx->mem_pool, - pages_to_grow); + pages_to_grow, kctx->task); #ifdef CONFIG_MALI_2MB_ALLOC } #endif @@ -1134,7 +1134,8 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn, */ mutex_unlock(&kctx->mmu.mmu_lock); err = kbase_mem_pool_grow(&kctx->kbdev->mem_pool, - MIDGARD_MMU_BOTTOMLEVEL); + MIDGARD_MMU_BOTTOMLEVEL, + kctx ? kctx->task : NULL); mutex_lock(&kctx->mmu.mmu_lock); } while (!err); if (err) { @@ -1278,7 +1279,8 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev, */ mutex_unlock(&mmut->mmu_lock); err = kbase_mem_pool_grow(&kbdev->mem_pool, - cur_level); + cur_level, + mmut->kctx ? mmut->kctx->task : NULL); mutex_lock(&mmut->mmu_lock); } while (!err); @@ -1769,7 +1771,8 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, */ mutex_unlock(&kctx->mmu.mmu_lock); err = kbase_mem_pool_grow(&kctx->kbdev->mem_pool, - MIDGARD_MMU_BOTTOMLEVEL); + MIDGARD_MMU_BOTTOMLEVEL, + kctx ? kctx->task : NULL); mutex_lock(&kctx->mmu.mmu_lock); } while (!err); if (err) { @@ -1896,7 +1899,8 @@ int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, int err; err = kbase_mem_pool_grow(&kbdev->mem_pool, - MIDGARD_MMU_BOTTOMLEVEL); + MIDGARD_MMU_BOTTOMLEVEL, + kctx ? kctx->task : NULL); if (err) { kbase_mmu_term(kbdev, mmut); return -ENOMEM; |