summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-03-03 01:44:12 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-03-03 01:44:12 +0000
commit2ee81ac2b660198580f5e9194761914e06665753 (patch)
treedca8a50e582fca90d6d7a986578c0d3c56323a85
parentf48579d406bbd5fbc2b29f3107c2f40217a95aae (diff)
parent902041789d20bb94f58526ffa1ae2aa2ddb29ae1 (diff)
downloadgraphics-android-msm-eos-5.15-tm-wear-kr3-pixel-watch.tar.gz
Merge cherrypicks of ['partner-android-review.googlesource.com/2755567', 'partner-android-review.googlesource.com/2755569'] into android13-msm-pixelwatch-5.15-24Q1-release.android-wear-13.0.0_r0.14android-msm-eos-5.15-tm-wear-kr3-pixel-watch
Change-Id: I4ff7aa83b6d58ccb9834b109e071e3c9ad2d62f2 Signed-off-by: Coastguard Worker <android-build-coastguard-worker@google.com>
-rw-r--r--adreno_gen7.c8
-rw-r--r--gen7_reg.h1
-rw-r--r--kgsl_mmu.c5
-rw-r--r--kgsl_sharedmem.c22
-rw-r--r--kgsl_vbo.c60
5 files changed, 78 insertions, 18 deletions
diff --git a/adreno_gen7.c b/adreno_gen7.c
index 05dad87..14cba26 100644
--- a/adreno_gen7.c
+++ b/adreno_gen7.c
@@ -47,6 +47,7 @@ static const u32 gen7_ifpc_pwrup_reglist[] = {
GEN7_SP_NC_MODE_CNTL,
GEN7_CP_DBG_ECO_CNTL,
GEN7_CP_PROTECT_CNTL,
+ GEN7_CP_LPAC_PROTECT_CNTL,
GEN7_CP_PROTECT_REG,
GEN7_CP_PROTECT_REG+1,
GEN7_CP_PROTECT_REG+2,
@@ -282,6 +283,7 @@ void gen7_get_gpu_feature_info(struct adreno_device *adreno_dev)
adreno_dev->feature_fuse = feature_fuse;
}
+#define GEN7_PROTECT_DEFAULT (BIT(0) | BIT(1) | BIT(3))
static void gen7_protect_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -294,8 +296,10 @@ static void gen7_protect_init(struct adreno_device *adreno_dev)
* protect violation and select the last span to protect from the start
* address all the way to the end of the register address space
*/
- kgsl_regwrite(device, GEN7_CP_PROTECT_CNTL,
- BIT(0) | BIT(1) | BIT(3));
+ kgsl_regwrite(device, GEN7_CP_PROTECT_CNTL, GEN7_PROTECT_DEFAULT);
+
+ if (adreno_dev->lpac_enabled)
+ kgsl_regwrite(device, GEN7_CP_LPAC_PROTECT_CNTL, GEN7_PROTECT_DEFAULT);
/* Program each register defined by the core definition */
for (i = 0; regs[i].reg; i++) {
diff --git a/gen7_reg.h b/gen7_reg.h
index 83f06cf..6e1021a 100644
--- a/gen7_reg.h
+++ b/gen7_reg.h
@@ -139,6 +139,7 @@
/* LPAC registers */
#define GEN7_CP_LPAC_RB_RPTR 0xb06
#define GEN7_CP_LPAC_RB_WPTR 0xb07
+#define GEN7_CP_LPAC_PROTECT_CNTL 0xb09
#define GEN7_CP_LPAC_DRAW_STATE_ADDR 0xb0a
#define GEN7_CP_LPAC_DRAW_STATE_DATA 0xb0b
#define GEN7_CP_LPAC_ROQ_DBG_ADDR 0xb0c
diff --git a/kgsl_mmu.c b/kgsl_mmu.c
index 8992d72..b55714a 100644
--- a/kgsl_mmu.c
+++ b/kgsl_mmu.c
@@ -457,6 +457,8 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
size = kgsl_memdesc_footprint(memdesc);
ret = pagetable->pt_ops->mmu_unmap(pagetable, memdesc);
+ if (ret)
+ return ret;
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
@@ -486,7 +488,8 @@ kgsl_mmu_unmap_range(struct kgsl_pagetable *pagetable,
ret = pagetable->pt_ops->mmu_unmap_range(pagetable, memdesc,
offset, length);
- atomic_long_sub(length, &pagetable->stats.mapped);
+ if (!ret)
+ atomic_long_sub(length, &pagetable->stats.mapped);
}
return ret;
diff --git a/kgsl_sharedmem.c b/kgsl_sharedmem.c
index 5ee7e07..979a129 100644
--- a/kgsl_sharedmem.c
+++ b/kgsl_sharedmem.c
@@ -986,6 +986,9 @@ static void kgsl_contiguous_free(struct kgsl_memdesc *memdesc)
if (!memdesc->hostptr)
return;
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
atomic_long_sub(memdesc->size, &kgsl_driver.stats.coherent);
_kgsl_contiguous_free(memdesc);
@@ -1198,6 +1201,9 @@ static void kgsl_free_pages(struct kgsl_memdesc *memdesc)
kgsl_paged_unmap_kernel(memdesc);
WARN_ON(memdesc->hostptr);
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
_kgsl_free_pages(memdesc, memdesc->page_count);
@@ -1216,6 +1222,9 @@ static void kgsl_free_system_pages(struct kgsl_memdesc *memdesc)
kgsl_paged_unmap_kernel(memdesc);
WARN_ON(memdesc->hostptr);
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
for (i = 0; i < memdesc->page_count; i++)
@@ -1292,7 +1301,12 @@ static void kgsl_free_secure_system_pages(struct kgsl_memdesc *memdesc)
{
int i;
struct scatterlist *sg;
- int ret = kgsl_unlock_sgt(memdesc->sgt);
+ int ret;
+
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+
+ ret = kgsl_unlock_sgt(memdesc->sgt);
if (ret) {
/*
@@ -1322,8 +1336,12 @@ static void kgsl_free_secure_system_pages(struct kgsl_memdesc *memdesc)
static void kgsl_free_secure_pages(struct kgsl_memdesc *memdesc)
{
- int ret = kgsl_unlock_sgt(memdesc->sgt);
+ int ret;
+
+ if (memdesc->priv & KGSL_MEMDESC_MAPPED)
+ return;
+ ret = kgsl_unlock_sgt(memdesc->sgt);
if (ret) {
/*
* Unlock of the secure buffer failed. This buffer will
diff --git a/kgsl_vbo.c b/kgsl_vbo.c
index dd4129f..c7ef7d1 100644
--- a/kgsl_vbo.c
+++ b/kgsl_vbo.c
@@ -101,14 +101,15 @@ static void kgsl_memdesc_remove_range(struct kgsl_mem_entry *target,
* the entire range between start and last in this case.
*/
if (!entry || range->entry->id == entry->id) {
+ if (kgsl_mmu_unmap_range(memdesc->pagetable,
+ memdesc, range->range.start, bind_range_len(range)))
+ continue;
+
interval_tree_remove(node, &memdesc->ranges);
trace_kgsl_mem_remove_bind_range(target,
range->range.start, range->entry,
bind_range_len(range));
- kgsl_mmu_unmap_range(memdesc->pagetable,
- memdesc, range->range.start, bind_range_len(range));
-
if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
kgsl_mmu_map_zero_page_to_range(memdesc->pagetable,
memdesc, range->range.start, bind_range_len(range));
@@ -128,6 +129,7 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
struct kgsl_memdesc *memdesc = &target->memdesc;
struct kgsl_memdesc_bind_range *range =
bind_range_create(start, last, entry);
+ int ret = 0;
if (IS_ERR(range))
return PTR_ERR(range);
@@ -139,9 +141,12 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
* in one call. Otherwise we have to figure out what ranges to unmap
* while walking the interval tree.
*/
- if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc, start,
+ if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)) {
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc, start,
last - start + 1);
+ if (ret)
+ goto error;
+ }
next = interval_tree_iter_first(&memdesc->ranges, start, last);
@@ -160,10 +165,15 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
if (start <= cur->range.start) {
if (last >= cur->range.last) {
/* Unmap the entire cur range */
- if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
+ if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
cur->range.start,
cur->range.last - cur->range.start + 1);
+ if (ret) {
+ interval_tree_insert(node, &memdesc->ranges);
+ goto error;
+ }
+ }
kgsl_mem_entry_put(cur->entry);
kfree(cur);
@@ -171,10 +181,15 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
}
/* Unmap the range overlapping cur */
- if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
+ if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
cur->range.start,
last - cur->range.start + 1);
+ if (ret) {
+ interval_tree_insert(node, &memdesc->ranges);
+ goto error;
+ }
+ }
/* Adjust the start of the mapping */
cur->range.start = last + 1;
@@ -205,10 +220,15 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
}
/* Unmap the range overlapping cur */
- if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
+ if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO) {
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
start,
min_t(u64, cur->range.last, last) - start + 1);
+ if (ret) {
+ interval_tree_insert(node, &memdesc->ranges);
+ goto error;
+ }
+ }
cur->range.last = start - 1;
interval_tree_insert(node, &memdesc->ranges);
@@ -227,19 +247,26 @@ static int kgsl_memdesc_add_range(struct kgsl_mem_entry *target,
return kgsl_mmu_map_child(memdesc->pagetable, memdesc, start,
&entry->memdesc, offset, last - start + 1);
+
+error:
+ kgsl_mem_entry_put(range->entry);
+ kfree(range);
+ mutex_unlock(&memdesc->ranges_lock);
+ return ret;
}
static void kgsl_sharedmem_vbo_put_gpuaddr(struct kgsl_memdesc *memdesc)
{
struct interval_tree_node *node, *next;
struct kgsl_memdesc_bind_range *range;
+ int ret = 0;
/*
* If the VBO maps the zero range then we can unmap the entire
* pagetable region in one call.
*/
if (!(memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO))
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
0, memdesc->size);
/*
@@ -259,14 +286,21 @@ static void kgsl_sharedmem_vbo_put_gpuaddr(struct kgsl_memdesc *memdesc)
/* Unmap this range */
if (memdesc->flags & KGSL_MEMFLAGS_VBO_NO_MAP_ZERO)
- kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
+ ret = kgsl_mmu_unmap_range(memdesc->pagetable, memdesc,
range->range.start,
range->range.last - range->range.start + 1);
+ /* If unmap failed, mark the child memdesc as still mapped */
+ if (ret)
+ range->entry->memdesc.priv |= KGSL_MEMDESC_MAPPED;
+
kgsl_mem_entry_put(range->entry);
kfree(range);
}
+ if (ret)
+ return;
+
/* Put back the GPU address */
kgsl_mmu_put_gpuaddr(memdesc->pagetable, memdesc);