summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFred Lundquist <flundquist@google.com>2024-02-14 23:24:23 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-03-03 01:43:55 +0000
commit11b1f1b4966dad0fadb771527e2ddfbb18e5885d (patch)
tree7c012826989fb2893f31f551c67c10448f96f7ef
parentf48579d406bbd5fbc2b29f3107c2f40217a95aae (diff)
downloadgraphics-11b1f1b4966dad0fadb771527e2ddfbb18e5885d.tar.gz
Merge KGSL fixes for May 2024 SMR into android13-msm-pixelwatch-5.15
msm: kgsl: Do not free sharedmem if it cannot be unmapped Bug: 324446026 Bug: 318393843 Signed-off-by: Fred Lundquist <flundquist@google.com> (cherry picked from commit bad264f896099d73edb52172594bdf8cba1172a1) (cherry picked from https://partner-android-review.googlesource.com/q/commit:6b7ddcfdb4a0ef731aabc8e0cf73f7993a02e203) Merged-In: Ie6922a3d63be6c2494567167378038ad45fe06aa Change-Id: Ie6922a3d63be6c2494567167378038ad45fe06aa
-rw-r--r--kgsl_mmu.c5
-rw-r--r--kgsl_sharedmem.c22
-rw-r--r--kgsl_vbo.c60
3 files changed, 71 insertions, 16 deletions
diff --git a/kgsl_mmu.c b/kgsl_mmu.c
index 8992d72..b55714a 100644
--- a/kgsl_mmu.c
+++ b/kgsl_mmu.c
@@ -457,6 +457,8 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
size = kgsl_memdesc_footprint(memdesc);
ret = pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
+ if (ret)
+ return ret;
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
@@ -486,7 +488,8 @@ kgsl_mmu_unmap_range(struct kgsl_pagetable *pagetable,
ret = pagetable->pt_ops->mmu_unmap_range(pagetable, memdesc,
offset, length);
- atomic_long_sub(length, &pagetable->stats.mapped);
+ if (!ret)
+ atomic_long_sub(length, &pagetable->stats.mapped);
}
return ret;
diff --git a/kgsl_sharedmem.c b/kgsl_sharedmem.c
index 5ee7e07..979a129 100644
--- a/kgsl_sharedmem.c
+++ b/kgsl_sharedmem.c
@@ -986,6 +986,9 @@ static void kgsl_contiguous_free(struct kgsl_memdesc *memdesc)
if (!memdesc->hostptr)
return;
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
atomic_long_sub(memdesc->size, &kgsl_driver.stats.coherent);
_kgsl_contiguous_free(memdesc);
@@ -1198,6 +1201,9 @@ static void kgsl_free_pages(struct kgsl_memdesc *memdesc)
kgsl_paged_unmap_kernel(memdesc);
WARN_ON(memdesc->hostptr);
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
_kgsl_free_pages(memdesc, memdesc->page_count);
@@ -1216,6 +1222,9 @@ static void kgsl_free_system_pages(struct kgsl_memdesc *memdesc)
kgsl_paged_unmap_kernel(memdesc);
WARN_ON(memdesc->hostptr);
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
for (i = 0; i < memdesc->page_count; i++)
@@ -1292,7 +1301,12 @@ static void kgsl_free_secure_system_pages(struct kgsl_memdesc *memdesc)
{
int i;
struct scatterlist *sg;
- int ret = kgsl_unlock_sgt(memdesc->sgt);
+ int ret;
+
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
+ ret = kgsl_unlock_sgt(memdesc->sgt);
if (ret) {
/*
@@ -1322,8 +1336,12 @@ static void kgsl_free_secure_system_pages(struct kgsl_memdesc *memdesc)
static void kgsl_free_secure_pages(struct kgsl_memdesc *memdesc)
{
- int ret = kgsl_unlock_sgt(memdesc->sgt);
+ int ret;
+
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+ ret = kgsl_unlock_sgt(memdesc->sgt);
if (ret) {
/*
* Unlock of the secure buffer failed. This buffer will
diff --git a/kgsl_vbo.c b/kgsl_vbo.c
index dd4129f..c7ef7d1 100644
--- a/kgsl_vbo.c
+++ b/kgsl_vbo.c
@@ -101,14 +101,15 @@ static void kgsl_memdesc_remove_range(struct kgsl_mem_entry *target,
* the entire range between start and last in this case.
*/
if (!entry || range->entry->id == entry->id) {
+ if (kgsl_mmu_unmap_range(memdesc->pagetable,
+ memdesc, range->range.start, bind_range_len(range)))
+ continue;
+
interval_tree_remove(node, &memdesc->ranges);
trace_kgsl_mem_remove_bind_range(target,
range->range.start, range->entry,
bind_range_len(range));
- kgsl_mmu_unmap_range(memdesc->pagetable,
- memdesc, range->range.start, bind_range_len(range));
-
if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
kgsl_mmu_map_zero_page_to_range(memdesc->pagetable,
memdesc, range->range.start, bind_range_len(range));
@@ -128,6 +129,7 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
struct kgsl_memdesc *memdesc = &target->memdesc;
struct kgsl_memdesc_bind_range *range =
bind_range_create(start, last, entry);
+ int ret = 0;
if (IS_ERR(range))
return PTR_ERR(range);
@@ -139,9 +141,12 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
* in one call. Otherwise we have to figure out what ranges to unmap
* while walking the interval tree.
*/
- if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc, start,
+ if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)) {
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc, start,
last - start + 1);
+ if (ret)
+ goto error;
+ }
next = interval_tree_iter_first(&memdesc->ranges, start, last);
@@ -160,10 +165,15 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
if (start <= cur->range.start) {
if (last >= cur->range.last) {
/* Unmap the entire cur range */
- if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
+ if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
cur->range.start,
cur->range.last - cur->range.start + 1);
+ if (ret) {
+ interval_tree_insert(node, &memdesc->ranges);
+ goto error;
+ }
+ }
kgsl_mem_entry_put(cur->entry);
kfree(cur);
@@ -171,10 +181,15 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
}
/* Unmap the range overlapping cur */
- if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
+ if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
cur->range.start,
last - cur->range.start + 1);
+ if (ret) {
+ interval_tree_insert(node, &memdesc->ranges);
+ goto error;
+ }
+ }
/* Adjust the start of the mapping */
cur->range.start = last + 1;
@@ -205,10 +220,15 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
}
/* Unmap the range overlapping cur */
- if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
+ if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
start,
min_t(u64, cur->range.last, last) - start + 1);
+ if (ret) {
+ interval_tree_insert(node, &memdesc->ranges);
+ goto error;
+ }
+ }
cur->range.last = start - 1;
interval_tree_insert(node, &memdesc->ranges);
@@ -227,19 +247,26 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
return kgsl_mmu_map_child(memdesc->pagetable, memdesc, start,
&entry->memdesc, offset, last - start + 1);
+
+error:
+ kgsl_mem_entry_put(range->entry);
+ kfree(range);
+ mutex_unlock(&memdesc->ranges_lock);
+ return ret;
}
static void kgsl_sharedmem_vbo_put_gpuaddr(struct kgsl_memdesc *memdesc)
{
struct interval_tree_node *node, *next;
struct kgsl_memdesc_bind_range *range;
+ int ret = 0;
/*
* If the VBO maps the zero range then we can unmap the entire
* pagetable region in one call.
*/
if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
0, memdesc->size);
/*
@@ -259,14 +286,21 @@ static void kgsl_sharedmem_vbo_put_gpuaddr(struct kgsl_memdesc *memdesc)
/* Unmap this range */
if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
range->range.start,
range->range.last - range->range.start + 1);
+ /* If unmap failed, mark the child memdesc as still mapped */
+ if (ret)
+ range->entry->memdesc.priv |= KGSL_MEMDESC_MAPPED;
+
kgsl_mem_entry_put(range->entry);
kfree(range);
}
+ if (ret)
+ return;
+
/* Put back the GPU address */
kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);