summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTodd Frederick <tfred@google.com>2023-08-12 18:35:50 +0000
committerTodd Frederick <tfred@google.com>2023-08-13 20:36:41 +0000
commitff60593b5795bfd8029b8c62288b833ba26d88ed (patch)
treef3821f88dfd09143842a3ca5f386eaa0ccd468e4
parent07e8f4e40feb3c8535374ae990afcd8bd8eb4ce0 (diff)
downloadexynos-ff60593b5795bfd8029b8c62288b833ba26d88ed.tar.gz
gpu: r29p0: Security Errata 2871751-2871746
Bug: 295041809 Change-Id: I8f183df682362a9c5e5d967ec0ca22444a082dae Signed-off-by: Todd Frederick <tfred@google.com>
-rw-r--r--drivers/gpu/arm/t72x/r29p0/mali_kbase_context.c23
-rw-r--r--drivers/gpu/arm/t72x/r29p0/mali_kbase_defs.h4
-rw-r--r--drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.c19
-rw-r--r--drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.h12
-rw-r--r--drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_pool.c73
-rw-r--r--drivers/gpu/arm/t72x/r29p0/mali_kbase_mmu.c16
6 files changed, 126 insertions, 21 deletions
diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.c
index 6a8d1e024535..e767337e0c36 100644
--- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.c
+++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_context.c
@@ -25,6 +25,12 @@
/*
* Base kernel context APIs
*/
+#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/task.h>
+#else
+#include <linux/sched.h>
+#endif
#include <mali_kbase.h>
#include <mali_midg_regmap.h>
@@ -39,6 +45,10 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
int err;
struct page *p;
+ struct pid *pid_struct;
+ struct task_struct *task;
+
+
KBASE_DEBUG_ASSERT(kbdev != NULL);
/* zero-inited as lot of code assume it's zero'ed out on create */
@@ -67,6 +77,16 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
kctx->tgid = current->tgid;
kctx->pid = current->pid;
+
+ rcu_read_lock();
+ pid_struct = find_get_pid(kctx->tgid);
+ task = pid_task(pid_struct, PIDTYPE_PID);
+ get_task_struct(task);
+ kctx->task = task;
+ put_pid(pid_struct);
+ rcu_read_unlock();
+
+
err = kbase_mem_pool_init(&kctx->mem_pool,
kbdev->mem_pool_max_size_default,
KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER,
@@ -323,6 +343,9 @@ void kbase_destroy_context(struct kbase_context *kctx)
kctx->ctx_need_qos = false;
}
+
+ put_task_struct(kctx->task);
+
vfree(kctx);
/* MALI_SEC_INTEGRATION */
kctx = NULL;
diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_defs.h b/drivers/gpu/arm/t72x/r29p0/mali_kbase_defs.h
index 12dd75f1181e..528a0425f427 100644
--- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_defs.h
+++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_defs.h
@@ -2071,6 +2071,8 @@ struct kbase_reg_zone {
* @priority: Indicates the context priority. Used along with @atoms_count
* for context scheduling, protected by hwaccess_lock.
* @atoms_count: Number of gpu atoms currently in use, per priority
+ * @task: Pointer to the task structure of the main thread of
+ * the process that created the Kbase context.
*/
struct kbase_context {
struct file *filp;
@@ -2226,6 +2228,8 @@ struct kbase_context {
int priority;
s16 atoms_count[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
+ struct task_struct *task;
+
/* MALI_SEC_INTEGRATION */
bool destroying_context;
atomic_t mem_profile_showing_state;
diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.c
index f98af64d2fbe..2fac9c910817 100644
--- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.c
+++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.c
@@ -1962,9 +1962,10 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
int nr_lp = nr_left / (SZ_2M / SZ_4K);
res = kbase_mem_pool_alloc_pages(&kctx->lp_mem_pool,
- nr_lp * (SZ_2M / SZ_4K),
- tp,
- true);
+ nr_lp * (SZ_2M / SZ_4K),
+ tp,
+ true,
+ kctx->task);
if (res > 0) {
nr_left -= res;
@@ -2013,7 +2014,8 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
np = kbase_mem_pool_alloc(&kctx->lp_mem_pool);
if (np)
break;
- err = kbase_mem_pool_grow(&kctx->lp_mem_pool, 1);
+ err = kbase_mem_pool_grow(&kctx->lp_mem_pool, 1,
+ kctx->task);
if (err)
break;
} while (1);
@@ -2055,9 +2057,10 @@ no_new_partial:
if (nr_left) {
res = kbase_mem_pool_alloc_pages(&kctx->mem_pool,
- nr_left,
- tp,
- false);
+ nr_left,
+ tp,
+ false,
+ kctx->task);
if (res <= 0)
goto alloc_failed;
}
@@ -3161,7 +3164,7 @@ static int kbase_jit_grow(struct kbase_context *kctx,
spin_unlock(&kctx->mem_partials_lock);
kbase_gpu_vm_unlock(kctx);
- if (kbase_mem_pool_grow(pool, pool_delta))
+ if (kbase_mem_pool_grow(pool, pool_delta, kctx->task))
goto update_failed_unlocked;
kbase_gpu_vm_lock(kctx);
diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.h b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.h
index d0d28271606c..4dccd403acb2 100644
--- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.h
+++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem.h
@@ -746,6 +746,9 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
* @pages: Pointer to array where the physical address of the allocated
* pages will be stored.
* @partial_allowed: If fewer pages allocated is allowed
+ * @page_owner: Pointer to the task that created the Kbase context for which
+ * the pages are being allocated. It can be NULL if the pages
+ * won't be associated with any Kbase context.
*
* Like kbase_mem_pool_alloc() but optimized for allocating many pages.
*
@@ -762,7 +765,8 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
* this lock, it should use kbase_mem_pool_alloc_pages_locked() instead.
*/
int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
- struct tagged_addr *pages, bool partial_allowed);
+ struct tagged_addr *pages, bool partial_allowed,
+ struct task_struct *page_owner);
/**
* kbase_mem_pool_alloc_pages_locked - Allocate pages from memory pool
@@ -874,13 +878,17 @@ void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);
* kbase_mem_pool_grow - Grow the pool
* @pool: Memory pool to grow
* @nr_to_grow: Number of pages to add to the pool
+ * @page_owner: Pointer to the task that created the Kbase context for which
+ * the memory pool is being grown. It can be NULL if the pages
+ * to be allocated won't be associated with any Kbase context.
*
* Adds @nr_to_grow pages to the pool. Note that this may cause the pool to
* become larger than the maximum size specified.
*
* Returns: 0 on success, -ENOMEM if unable to allocate sufficent pages
*/
-int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow);
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow,
+ struct task_struct *page_owner);
/**
* kbase_mem_pool_trim - Grow or shrink the pool to a new size
diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_pool.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_pool.c
index 0f91be17a81b..3def6fbcc9da 100644
--- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_pool.c
+++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mem_pool.c
@@ -28,6 +28,11 @@
#include <linux/shrinker.h>
#include <linux/atomic.h>
#include <linux/version.h>
+#if KERNEL_VERSION(4, 11, 0) <= LINUX_VERSION_CODE
+#include <linux/sched/signal.h>
+#else
+#include <linux/signal.h>
+#endif
#define pool_dbg(pool, format, ...) \
dev_dbg(pool->kbdev->dev, "%s-pool [%zu/%zu]: " format, \
@@ -39,6 +44,52 @@
#define NOT_DIRTY false
#define NOT_RECLAIMED false
+/**
+ * can_alloc_page() - Check if the current thread can allocate a physical page
+ *
+ * @pool: Pointer to the memory pool.
+ * @page_owner: Pointer to the task/process that created the Kbase
+ * context for which a page needs to be allocated. It can
+ * be NULL if the page won't be associated with Kbase
+ * context.
+ * @alloc_from_kthread: Flag indicating that the current thread is a kernel
+ * thread.
+ *
+ * This function checks if the current thread is a kernel thread and can make a
+ * request to kernel to allocate a physical page. If the kernel thread is
+ * allocating a page for the Kbase context and the process that created the
+ * context is exiting or is being killed, then there is no point in doing a
+ * page allocation.
+ *
+ * The check done by the function is particularly helpful when the system is
+ * running low on memory. When a page is allocated from the context of a kernel
+ * thread, OoM killer doesn't consider the kernel thread for killing and kernel
+ * keeps retrying to allocate the page as long as the OoM killer is able to
+ * kill processes. The check allows kernel thread to quickly exit the page
+ * allocation loop once OoM killer has initiated the killing of @page_owner,
+ * thereby unblocking the context termination for @page_owner and freeing of
+ * GPU memory allocated by it. This helps in preventing the kernel panic and
+ * also limits the number of innocent processes that get killed.
+ *
+ * Return: true if the page can be allocated otherwise false.
+ */
+static inline bool can_alloc_page(struct kbase_mem_pool *pool,
+ struct task_struct *page_owner,
+ const bool alloc_from_kthread)
+{
+ if (likely(!alloc_from_kthread || !page_owner))
+ return true;
+
+ if ((page_owner->flags & PF_EXITING) ||
+ fatal_signal_pending(page_owner)) {
+ dev_info(pool->kbdev->dev, "%s : Process %s/%d exiting",
+ __func__, page_owner->comm, task_pid_nr(page_owner));
+ return false;
+ }
+
+ return true;
+}
+
static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
{
ssize_t max_size = kbase_mem_pool_max_size(pool);
@@ -233,11 +284,12 @@ static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
return nr_freed;
}
-int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
- size_t nr_to_grow)
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow,
+ struct task_struct *page_owner)
{
struct page *p;
size_t i;
+ const bool alloc_from_kthread = !!(current->flags & PF_KTHREAD);
kbase_mem_pool_lock(pool);
@@ -252,6 +304,10 @@ int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
}
kbase_mem_pool_unlock(pool);
+ if (unlikely(!can_alloc_page(pool, page_owner,
+ alloc_from_kthread)))
+ return -ENOMEM;
+
p = kbase_mem_alloc_page(pool);
if (!p) {
kbase_mem_pool_lock(pool);
@@ -283,7 +339,7 @@ void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
if (new_size < cur_size)
kbase_mem_pool_shrink(pool, cur_size - new_size);
else if (new_size > cur_size)
- err = kbase_mem_pool_grow(pool, new_size - cur_size);
+ err = kbase_mem_pool_grow(pool, new_size - cur_size, NULL);
if (err) {
size_t grown_size = kbase_mem_pool_size(pool);
@@ -539,13 +595,15 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
}
int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
- struct tagged_addr *pages, bool partial_allowed)
+ struct tagged_addr *pages, bool partial_allowed,
+ struct task_struct *page_owner)
{
struct page *p;
size_t nr_from_pool;
size_t i = 0;
int err = -ENOMEM;
size_t nr_pages_internal;
+ const bool alloc_from_kthread = !!(current->flags & PF_KTHREAD);
nr_pages_internal = nr_4k_pages / (1u << (pool->order));
@@ -577,7 +635,8 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
if (i != nr_4k_pages && pool->next_pool) {
/* Allocate via next pool */
err = kbase_mem_pool_alloc_pages(pool->next_pool,
- nr_4k_pages - i, pages + i, partial_allowed);
+ nr_4k_pages - i, pages + i, partial_allowed,
+ page_owner);
if (err < 0)
goto err_rollback;
@@ -586,6 +645,10 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
} else {
/* Get any remaining pages from kernel */
while (i != nr_4k_pages) {
+ if (unlikely(!can_alloc_page(pool, page_owner,
+ alloc_from_kthread)))
+ goto err_rollback;
+
p = kbase_mem_alloc_page(pool);
if (!p) {
if (partial_allowed)
diff --git a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mmu.c b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mmu.c
index b7e2f03f7d4b..56f3d66a246d 100644
--- a/drivers/gpu/arm/t72x/r29p0/mali_kbase_mmu.c
+++ b/drivers/gpu/arm/t72x/r29p0/mali_kbase_mmu.c
@@ -843,11 +843,11 @@ page_fault_retry:
((1 << kctx->lp_mem_pool.order) - 1))
>> kctx->lp_mem_pool.order;
ret = kbase_mem_pool_grow(&kctx->lp_mem_pool,
- pages_to_grow);
+ pages_to_grow, kctx->task);
} else {
#endif
ret = kbase_mem_pool_grow(&kctx->mem_pool,
- pages_to_grow);
+ pages_to_grow, kctx->task);
#ifdef CONFIG_MALI_2MB_ALLOC
}
#endif
@@ -1134,7 +1134,8 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
*/
mutex_unlock(&kctx->mmu.mmu_lock);
err = kbase_mem_pool_grow(&kctx->kbdev->mem_pool,
- MIDGARD_MMU_BOTTOMLEVEL);
+ MIDGARD_MMU_BOTTOMLEVEL,
+ kctx ? kctx->task : NULL);
mutex_lock(&kctx->mmu.mmu_lock);
} while (!err);
if (err) {
@@ -1278,7 +1279,8 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
*/
mutex_unlock(&mmut->mmu_lock);
err = kbase_mem_pool_grow(&kbdev->mem_pool,
- cur_level);
+ cur_level,
+ mmut->kctx ? mmut->kctx->task : NULL);
mutex_lock(&mmut->mmu_lock);
} while (!err);
@@ -1769,7 +1771,8 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
*/
mutex_unlock(&kctx->mmu.mmu_lock);
err = kbase_mem_pool_grow(&kctx->kbdev->mem_pool,
- MIDGARD_MMU_BOTTOMLEVEL);
+ MIDGARD_MMU_BOTTOMLEVEL,
+ kctx ? kctx->task : NULL);
mutex_lock(&kctx->mmu.mmu_lock);
} while (!err);
if (err) {
@@ -1896,7 +1899,8 @@ int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
int err;
err = kbase_mem_pool_grow(&kbdev->mem_pool,
- MIDGARD_MMU_BOTTOMLEVEL);
+ MIDGARD_MMU_BOTTOMLEVEL,
+ kctx ? kctx->task : NULL);
if (err) {
kbase_mmu_term(kbdev, mmut);
return -ENOMEM;