summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem_pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/mali_kbase_mem_pool.c')
-rw-r--r--mali_kbase/mali_kbase_mem_pool.c315
1 files changed, 256 insertions, 59 deletions
diff --git a/mali_kbase/mali_kbase_mem_pool.c b/mali_kbase/mali_kbase_mem_pool.c
index c991adf..d942ff5 100644
--- a/mali_kbase/mali_kbase_mem_pool.c
+++ b/mali_kbase/mali_kbase_mem_pool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2015-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -21,12 +21,18 @@
#include <mali_kbase.h>
#include <linux/mm.h>
+#include <linux/migrate.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/shrinker.h>
#include <linux/atomic.h>
#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/signal.h>
+#else
+#include <linux/signal.h>
+#endif
#define pool_dbg(pool, format, ...) \
dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
@@ -70,6 +76,41 @@ static void kbase_mem_pool_ordered_add_array_spill_locked(
struct tagged_addr *pages, struct list_head *spillover_list,
bool zero, bool sync);
+/**
+ * can_alloc_page() - Check if the current thread can allocate a physical page
+ *
+ * @pool: Pointer to the memory pool.
+ * @page_owner: Pointer to the task/process that created the Kbase context
+ * for which a page needs to be allocated. It can be NULL if
+ * the page won't be associated with Kbase context.
+ *
+ * This function checks if the current thread can make a request to kernel to
+ * allocate a physical page. If the process that created the context is exiting or
+ * is being killed, then there is no point in doing a page allocation.
+ *
+ * The check done by the function is particularly helpful when the system is running
+ * low on memory. When a page is allocated from the context of a kernel thread, OoM
+ * killer doesn't consider the kernel thread for killing and kernel keeps retrying
+ * to allocate the page as long as the OoM killer is able to kill processes.
+ * The check allows to quickly exit the page allocation loop once OoM
+ * killer has initiated the killing of @page_owner, thereby unblocking the context
+ * termination for @page_owner and freeing of GPU memory allocated by it. This helps
+ * in preventing the kernel panic and also limits the number of innocent processes
+ * that get killed.
+ *
+ * Return: true if the page can be allocated otherwise false.
+ */
+static inline bool can_alloc_page(struct kbase_mem_pool *pool, struct task_struct *page_owner)
+{
+ if (page_owner && ((page_owner->flags & PF_EXITING) || fatal_signal_pending(page_owner))) {
+ dev_info(pool->kbdev->dev, "%s : Process %s/%d exiting", __func__, page_owner->comm,
+ task_pid_nr(page_owner));
+ return false;
+ }
+
+ return true;
+}
+
static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
{
ssize_t max_size = kbase_mem_pool_max_size(pool);
@@ -88,9 +129,47 @@ static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
return kbase_mem_pool_size(pool) == 0;
}
+static bool set_pool_new_page_metadata(struct kbase_mem_pool *pool, struct page *p,
+ struct list_head *page_list, size_t *list_size)
+{
+ struct kbase_page_metadata *page_md = kbase_page_private(p);
+ bool not_movable = false;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ /* Free the page instead of adding it to the pool if it's not movable.
+ * Only update page status and add the page to the memory pool if
+ * it is not isolated.
+ */
+ if (!IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT))
+ not_movable = true;
+ else {
+ spin_lock(&page_md->migrate_lock);
+ if (PAGE_STATUS_GET(page_md->status) == (u8)NOT_MOVABLE) {
+ not_movable = true;
+ } else if (!WARN_ON_ONCE(IS_PAGE_ISOLATED(page_md->status))) {
+ page_md->status = PAGE_STATUS_SET(page_md->status, (u8)MEM_POOL);
+ page_md->data.mem_pool.pool = pool;
+ page_md->data.mem_pool.kbdev = pool->kbdev;
+ list_add(&p->lru, page_list);
+ (*list_size)++;
+ }
+ spin_unlock(&page_md->migrate_lock);
+ }
+
+ if (not_movable) {
+ kbase_free_page_later(pool->kbdev, p);
+ pool_dbg(pool, "skipping a not movable page\n");
+ }
+
+ return not_movable;
+}
+
static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
struct page *p)
{
+ bool queue_work_to_free = false;
+
if (mali_kbase_mem_pool_order_pages_enabled) {
kbase_mem_pool_ordered_add_locked(pool, p);
return;
@@ -98,8 +177,19 @@ static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
lockdep_assert_held(&pool->pool_lock);
- list_add(&p->lru, &pool->page_list);
- pool->cur_size++;
+ if (!pool->order && kbase_is_page_migration_enabled()) {
+ if (set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size))
+ queue_work_to_free = true;
+ } else {
+ list_add(&p->lru, &pool->page_list);
+ pool->cur_size++;
+ }
+
+ if (queue_work_to_free) {
+ struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
+
+ queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
+ }
pool_dbg(pool, "added page\n");
}
@@ -114,10 +204,28 @@ static void kbase_mem_pool_add(struct kbase_mem_pool *pool, struct page *p)
static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
struct list_head *page_list, size_t nr_pages)
{
+ bool queue_work_to_free = false;
+
lockdep_assert_held(&pool->pool_lock);
- list_splice(page_list, &pool->page_list);
- pool->cur_size += nr_pages;
+ if (!pool->order && kbase_is_page_migration_enabled()) {
+ struct page *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, page_list, lru) {
+ list_del_init(&p->lru);
+ if (set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size))
+ queue_work_to_free = true;
+ }
+ } else {
+ list_splice(page_list, &pool->page_list);
+ pool->cur_size += nr_pages;
+ }
+
+ if (queue_work_to_free) {
+ struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
+
+ queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
+ }
pool_dbg(pool, "added %zu pages\n", nr_pages);
}
@@ -130,7 +238,8 @@ static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
kbase_mem_pool_unlock(pool);
}
-static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
+static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool,
+ enum kbase_page_status status)
{
struct page *p;
@@ -140,6 +249,16 @@ static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
return NULL;
p = list_first_entry(&pool->page_list, struct page, lru);
+
+ if (!pool->order && kbase_is_page_migration_enabled()) {
+ struct kbase_page_metadata *page_md = kbase_page_private(p);
+
+ spin_lock(&page_md->migrate_lock);
+ WARN_ON(PAGE_STATUS_GET(page_md->status) != (u8)MEM_POOL);
+ page_md->status = PAGE_STATUS_SET(page_md->status, (u8)status);
+ spin_unlock(&page_md->migrate_lock);
+ }
+
list_del_init(&p->lru);
pool->cur_size--;
@@ -148,12 +267,13 @@ static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
return p;
}
-static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
+static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool,
+ enum kbase_page_status status)
{
struct page *p;
kbase_mem_pool_lock(pool);
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, status);
kbase_mem_pool_unlock(pool);
return p;
@@ -163,9 +283,9 @@ static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
struct page *p)
{
struct device *dev = pool->kbdev->dev;
+ dma_addr_t dma_addr = pool->order ? kbase_dma_addr_as_priv(p) : kbase_dma_addr(p);
- dma_sync_single_for_device(dev, kbase_dma_addr(p),
- (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(dev, dma_addr, (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
}
static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
@@ -196,7 +316,7 @@ static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
{
struct page *p;
- gfp_t gfp = GFP_HIGHUSER | __GFP_ZERO;
+ gfp_t gfp = __GFP_ZERO;
struct kbase_device *const kbdev = pool->kbdev;
struct device *const dev = kbdev->dev;
dma_addr_t dma_addr;
@@ -204,7 +324,9 @@ struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
/* don't warn on higher order failures */
if (pool->order)
- gfp |= __GFP_NOWARN;
+ gfp |= GFP_HIGHUSER | __GFP_NOWARN;
+ else
+ gfp |= kbase_is_page_migration_enabled() ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
p = kbdev->mgm_dev->ops.mgm_alloc_page(kbdev->mgm_dev,
pool->group_id, gfp, pool->order);
@@ -220,30 +342,59 @@ struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
return NULL;
}
- WARN_ON(dma_addr != page_to_phys(p));
- for (i = 0; i < (1u << pool->order); i++)
- kbase_set_dma_addr(p+i, dma_addr + PAGE_SIZE * i);
+ /* Setup page metadata for 4KB pages when page migration is enabled */
+ if (!pool->order && kbase_is_page_migration_enabled()) {
+ INIT_LIST_HEAD(&p->lru);
+ if (!kbase_alloc_page_metadata(kbdev, p, dma_addr, pool->group_id)) {
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, pool->group_id, p,
+ pool->order);
+ return NULL;
+ }
+ } else {
+ WARN_ON(dma_addr != page_to_phys(p));
+ for (i = 0; i < (1u << pool->order); i++)
+ kbase_set_dma_addr_as_priv(p + i, dma_addr + PAGE_SIZE * i);
+ }
return p;
}
-static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
- struct page *p)
+static void enqueue_free_pool_pages_work(struct kbase_mem_pool *pool)
{
- struct kbase_device *const kbdev = pool->kbdev;
- struct device *const dev = kbdev->dev;
- dma_addr_t dma_addr = kbase_dma_addr(p);
- int i;
+ struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
+
+ if (!pool->order && kbase_is_page_migration_enabled())
+ queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
+}
- dma_unmap_page(dev, dma_addr, (PAGE_SIZE << pool->order),
- DMA_BIDIRECTIONAL);
- for (i = 0; i < (1u << pool->order); i++)
- kbase_clear_dma_addr(p+i);
+void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p)
+{
+ struct kbase_device *kbdev;
+
+ if (WARN_ON(!pool))
+ return;
+ if (WARN_ON(!p))
+ return;
+
+ kbdev = pool->kbdev;
- kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev,
- pool->group_id, p, pool->order);
+ if (!pool->order && kbase_is_page_migration_enabled()) {
+ kbase_free_page_later(kbdev, p);
+ pool_dbg(pool, "page to be freed to kernel later\n");
+ } else {
+ int i;
+ dma_addr_t dma_addr = kbase_dma_addr_as_priv(p);
+
+ for (i = 0; i < (1u << pool->order); i++)
+ kbase_clear_dma_addr_as_priv(p + i);
+
+ dma_unmap_page(kbdev->dev, dma_addr, (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
- pool_dbg(pool, "freed page to kernel\n");
+ kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, pool->group_id, p, pool->order);
+
+ pool_dbg(pool, "freed page to kernel\n");
+ }
}
static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
@@ -255,10 +406,13 @@ static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
lockdep_assert_held(&pool->pool_lock);
for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, FREE_IN_PROGRESS);
kbase_mem_pool_free_page(pool, p);
}
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ enqueue_free_pool_pages_work(pool);
+
return i;
}
@@ -274,8 +428,8 @@ static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
return nr_freed;
}
-int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
- size_t nr_to_grow)
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow,
+ struct task_struct *page_owner)
{
struct page *p;
size_t i;
@@ -293,6 +447,9 @@ int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
}
kbase_mem_pool_unlock(pool);
+ if (unlikely(!can_alloc_page(pool, page_owner)))
+ return -ENOMEM;
+
p = kbase_mem_alloc_page(pool);
if (!p) {
kbase_mem_pool_lock(pool);
@@ -310,6 +467,7 @@ int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
return 0;
}
+KBASE_EXPORT_TEST_API(kbase_mem_pool_grow);
void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
{
@@ -324,7 +482,7 @@ void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
if (new_size < cur_size)
kbase_mem_pool_shrink(pool, cur_size - new_size);
else if (new_size > cur_size)
- err = kbase_mem_pool_grow(pool, new_size - cur_size);
+ err = kbase_mem_pool_grow(pool, new_size - cur_size, NULL);
if (err) {
size_t grown_size = kbase_mem_pool_size(pool);
@@ -365,6 +523,9 @@ static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
kbase_mem_pool_lock(pool);
if (pool->dont_reclaim && !pool->dying) {
kbase_mem_pool_unlock(pool);
+ /* Tell shrinker to skip reclaim
+ * even though freeable pages are available
+ */
return 0;
}
pool_size = kbase_mem_pool_size(pool);
@@ -384,7 +545,10 @@ static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
kbase_mem_pool_lock(pool);
if (pool->dont_reclaim && !pool->dying) {
kbase_mem_pool_unlock(pool);
- return 0;
+ /* Tell shrinker that reclaim can't be made and
+ * do not attempt again for this reclaim context.
+ */
+ return SHRINK_STOP;
}
pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
@@ -398,12 +562,9 @@ static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
return freed;
}
-int kbase_mem_pool_init(struct kbase_mem_pool *pool,
- const struct kbase_mem_pool_config *config,
- unsigned int order,
- int group_id,
- struct kbase_device *kbdev,
- struct kbase_mem_pool *next_pool)
+int kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config,
+ unsigned int order, int group_id, struct kbase_device *kbdev,
+ struct kbase_mem_pool *next_pool)
{
if (WARN_ON(group_id < 0) ||
WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)) {
@@ -417,6 +578,7 @@ int kbase_mem_pool_init(struct kbase_mem_pool *pool,
pool->kbdev = kbdev;
pool->next_pool = next_pool;
pool->dying = false;
+ atomic_set(&pool->isolation_in_progress_cnt, 0);
spin_lock_init(&pool->pool_lock);
INIT_LIST_HEAD(&pool->page_list);
@@ -428,12 +590,17 @@ int kbase_mem_pool_init(struct kbase_mem_pool *pool,
* struct shrinker does not define batch
*/
pool->reclaim.batch = 0;
- register_shrinker(&pool->reclaim, "mali-mempool");
+#if KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE
+ register_shrinker(&pool->reclaim);
+#else
+ register_shrinker(&pool->reclaim, "mali-mem-pool");
+#endif
pool_dbg(pool, "initialized\n");
return 0;
}
+KBASE_EXPORT_TEST_API(kbase_mem_pool_init);
void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool)
{
@@ -465,15 +632,17 @@ void kbase_mem_pool_term(struct kbase_mem_pool *pool)
/* Zero pages first without holding the next_pool lock */
for (i = 0; i < nr_to_spill; i++) {
- p = kbase_mem_pool_remove_locked(pool);
- list_add(&p->lru, &spill_list);
+ p = kbase_mem_pool_remove_locked(pool, SPILL_IN_PROGRESS);
+ if (p)
+ list_add(&p->lru, &spill_list);
}
}
while (!kbase_mem_pool_is_empty(pool)) {
/* Free remaining pages to kernel */
- p = kbase_mem_pool_remove_locked(pool);
- list_add(&p->lru, &free_list);
+ p = kbase_mem_pool_remove_locked(pool, FREE_IN_PROGRESS);
+ if (p)
+ list_add(&p->lru, &free_list);
}
kbase_mem_pool_unlock(pool);
@@ -506,8 +675,19 @@ void kbase_mem_pool_term(struct kbase_mem_pool *pool)
kbase_mem_pool_free_page(pool, p);
}
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ enqueue_free_pool_pages_work(pool);
+
+ /* Before returning wait to make sure there are no pages undergoing page isolation
+ * which will require reference to this pool.
+ */
+ if (kbase_is_page_migration_enabled()) {
+ while (atomic_read(&pool->isolation_in_progress_cnt))
+ cpu_relax();
+ }
pool_dbg(pool, "terminated\n");
}
+KBASE_EXPORT_TEST_API(kbase_mem_pool_term);
struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
{
@@ -515,7 +695,7 @@ struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
do {
pool_dbg(pool, "alloc()\n");
- p = kbase_mem_pool_remove(pool);
+ p = kbase_mem_pool_remove(pool, ALLOCATE_IN_PROGRESS);
if (p)
return p;
@@ -528,17 +708,10 @@ struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool)
{
- struct page *p;
-
lockdep_assert_held(&pool->pool_lock);
pool_dbg(pool, "alloc_locked()\n");
- p = kbase_mem_pool_remove_locked(pool);
-
- if (p)
- return p;
-
- return NULL;
+ return kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
}
void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
@@ -565,6 +738,8 @@ void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
} else {
/* Free page */
kbase_mem_pool_free_page(pool, p);
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ enqueue_free_pool_pages_work(pool);
}
}
@@ -589,11 +764,14 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
} else {
/* Free page */
kbase_mem_pool_free_page(pool, p);
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ enqueue_free_pool_pages_work(pool);
}
}
int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
- struct tagged_addr *pages, bool partial_allowed)
+ struct tagged_addr *pages, bool partial_allowed,
+ struct task_struct *page_owner)
{
struct page *p;
size_t nr_from_pool;
@@ -612,10 +790,12 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
/* Get pages from this pool */
kbase_mem_pool_lock(pool);
nr_from_pool = min(nr_pages_internal, kbase_mem_pool_size(pool));
+
while (nr_from_pool--) {
int j;
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
+
if (pool->order) {
pages[i++] = as_tagged_tag(page_to_phys(p),
HUGE_HEAD | HUGE_PAGE);
@@ -631,8 +811,8 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
if (i != nr_4k_pages && pool->next_pool) {
/* Allocate via next pool */
- err = kbase_mem_pool_alloc_pages(pool->next_pool,
- nr_4k_pages - i, pages + i, partial_allowed);
+ err = kbase_mem_pool_alloc_pages(pool->next_pool, nr_4k_pages - i, pages + i,
+ partial_allowed, page_owner);
if (err < 0)
goto err_rollback;
@@ -641,6 +821,9 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
} else {
/* Get any remaining pages from kernel */
while (i != nr_4k_pages) {
+ if (unlikely(!can_alloc_page(pool, page_owner)))
+ goto err_rollback;
+
p = kbase_mem_alloc_page(pool);
if (!p) {
if (partial_allowed)
@@ -674,6 +857,9 @@ done:
err_rollback:
kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
+ dev_warn(pool->kbdev->dev,
+ "Failed allocation request for remaining %zu pages after obtaining %zu pages already.\n",
+ nr_4k_pages, i);
return err;
}
@@ -703,7 +889,7 @@ int kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool,
for (i = 0; i < nr_pages_internal; i++) {
int j;
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
if (pool->order) {
*pages++ = as_tagged_tag(page_to_phys(p),
HUGE_HEAD | HUGE_PAGE);
@@ -810,6 +996,7 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
size_t nr_to_pool;
LIST_HEAD(to_pool_list);
size_t i = 0;
+ bool pages_released = false;
if (mali_kbase_mem_pool_order_pages_enabled) {
kbase_mem_pool_ordered_free_pages(pool, nr_pages, pages, dirty,
@@ -848,13 +1035,17 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
pages[i] = as_tagged(0);
continue;
}
-
p = as_page(pages[i]);
kbase_mem_pool_free_page(pool, p);
pages[i] = as_tagged(0);
+ pages_released = true;
}
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ if (pages_released)
+ enqueue_free_pool_pages_work(pool);
+
pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
}
@@ -867,6 +1058,7 @@ void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
size_t nr_to_pool;
LIST_HEAD(to_pool_list);
size_t i = 0;
+ bool pages_released = false;
if (mali_kbase_mem_pool_order_pages_enabled) {
kbase_mem_pool_ordered_free_pages_locked(pool, nr_pages, pages,
@@ -903,8 +1095,13 @@ void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
kbase_mem_pool_free_page(pool, p);
pages[i] = as_tagged(0);
+ pages_released = true;
}
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ if (pages_released)
+ enqueue_free_pool_pages_work(pool);
+
pool_dbg(pool, "free_pages_locked(%zu) done\n", nr_pages);
}