summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJack Diver <diverj@google.com>2023-07-10 14:37:38 +0000
committerJack Diver <diverj@google.com>2023-08-21 16:50:24 +0000
commitd5304c2fbb0c4fbc9c4ded32d032d00cbc1cd39c (patch)
treeb7335c33bdabab686d226bb764081bae881784ee
parent526005ab91b3ba0be389883a3e771382721da4a8 (diff)
downloadgpu-d5304c2fbb0c4fbc9c4ded32d032d00cbc1cd39c.tar.gz
mali_pixel: mgm: Compensate for group migration
Pages in the SLC group were often 'migrated' there from the reserved group. This leads to unbalanced accounting as at alloc time the reserved group counter is incremented, but at free time the SLC group counter is decremented. Add custom logic to detect this, but plan to remove it soon when memory groups are decoupled from SLC partitions (b/290354607). Bug: 289501175 Signed-off-by: Jack Diver <diverj@google.com> (cherry picked from https://partner-android-review.googlesource.com/q/commit:7bb27d3d29f850315d0b7ee103fbdfe75e9403da) Merged-In: Ia878a05c4d8dd7db7969f7089c6b93bf90c17f8e Change-Id: Ia878a05c4d8dd7db7969f7089c6b93bf90c17f8e
-rw-r--r--mali_pixel/memory_group_manager.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/mali_pixel/memory_group_manager.c b/mali_pixel/memory_group_manager.c
index ed5f04f..0618994 100644
--- a/mali_pixel/memory_group_manager.c
+++ b/mali_pixel/memory_group_manager.c
@@ -434,7 +434,26 @@ static void update_size(struct memory_group_manager_device *mgm_dev, int
atomic_inc(size);
atomic64_add(1 << order, &total_gpu_pages);
} else {
- WARN_ON(atomic_dec_return(size) < 0);
+ if (atomic_dec_return(size) < 0) {
+ /* b/289501175
+ * Pages are often 'migrated' to the SLC group, which needs special
+ * accounting.
+ *
+ * TODO: Remove after SLC MGM decoupling b/290354607
+ */
+ if (!WARN_ON(group_id != MGM_SLC_GROUP_ID)) {
+ /* Undo the dec, and instead decrement the reserved group counter.
+ * This is still making the assumption that the migration came from
+ * the reserved group. Currently this is always true, however it
+ * might not be in future. It would be invasive and costly to track
+ * where every page came from, so instead this will be fixed as part
+ * of the b/290354607 effort.
+ */
+ atomic_inc(size);
+ update_size(mgm_dev, MGM_RESERVED_GROUP_ID, order, alloc);
+ return;
+ }
+ }
atomic64_sub(1 << order, &total_gpu_pages);
}