summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarlos Llamas <cmllamas@google.com>2023-12-05 03:08:33 +0000
committerRick Yiu <rickyiu@google.com>2024-01-17 03:02:06 +0000
commit2d287d1f298896624b62cee564c44a78ded8e769 (patch)
tree2ba80ce3622497cd0ffd6afc38e1d8674dece8d7
parent15f48d3c93bd1ff563be40783caffae448208133 (diff)
downloadgs-2d287d1f298896624b62cee564c44a78ded8e769.tar.gz
ANDROID: binder: fix KMI-break due to alloc->lock
Wrap 'struct binder_proc' inside 'struct binder_proc_wrap' to add the alloc->lock equivalent without breaking the KMI. Also, add convenient apis to access/modify this new spinlock. Without this patch, the following KMI issues show up: type 'struct binder_proc' changed byte size changed from 616 to 576 type 'struct binder_alloc' changed byte size changed from 152 to 112 member 'spinlock_t lock' was added member 'struct mutex mutex' was removed Bug: 254650075 Bug: 320576997 Change-Id: Ic31dc39fb82800a3e47be10a7873cd210f7b60be Signed-off-by: Carlos Llamas <cmllamas@google.com>
-rw-r--r--drivers/android/binder.c11
-rw-r--r--drivers/android/binder_alloc.c40
-rw-r--r--drivers/android/binder_alloc.h21
-rw-r--r--drivers/android/binder_internal.h60
4 files changed, 90 insertions, 42 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 840ceec57c49..ed45468c0794 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -5002,6 +5002,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
static void binder_free_proc(struct binder_proc *proc)
{
+ struct binder_proc_wrap *proc_wrap;
struct binder_device *device;
BUG_ON(!list_empty(&proc->todo));
@@ -5018,7 +5019,8 @@ static void binder_free_proc(struct binder_proc *proc)
put_task_struct(proc->tsk);
put_cred(proc->cred);
binder_stats_deleted(BINDER_STAT_PROC);
- kfree(proc);
+ proc_wrap = binder_proc_wrap_entry(proc);
+ kfree(proc_wrap);
}
static void binder_free_thread(struct binder_thread *thread)
@@ -5701,6 +5703,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
static int binder_open(struct inode *nodp, struct file *filp)
{
+ struct binder_proc_wrap *proc_wrap;
struct binder_proc *proc, *itr;
struct binder_device *binder_dev;
struct binderfs_info *info;
@@ -5710,9 +5713,11 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
current->group_leader->pid, current->pid);
- proc = kzalloc(sizeof(*proc), GFP_KERNEL);
- if (proc == NULL)
+ proc_wrap = kzalloc(sizeof(*proc_wrap), GFP_KERNEL);
+ if (proc_wrap == NULL)
return -ENOMEM;
+ proc = &proc_wrap->proc;
+
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 7d1e03eb6dd0..cbae312838fc 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -23,7 +23,7 @@
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/sizes.h>
-#include "binder_alloc.h"
+#include "binder_internal.h"
#include "binder_trace.h"
#include <trace/hooks/binder.h>
@@ -170,9 +170,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
{
struct binder_buffer *buffer;
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
return buffer;
}
@@ -616,10 +616,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
if (!next)
return ERR_PTR(-ENOMEM);
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
goto out;
}
@@ -627,7 +627,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
buffer->offsets_size = offsets_size;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = current->tgid;
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
ret = binder_install_buffer_pages(alloc, buffer, size);
if (ret) {
@@ -816,9 +816,9 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
binder_free_buf_locked(alloc, buffer);
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
}
/**
@@ -908,7 +908,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
struct binder_buffer *buffer;
buffers = 0;
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
BUG_ON(alloc->vma);
while ((n = rb_first(&alloc->allocated_buffers))) {
@@ -958,7 +958,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
}
kfree(alloc->pages);
}
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
if (alloc->vma_vm_mm)
mmdrop(alloc->vma_vm_mm);
@@ -981,7 +981,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
struct binder_buffer *buffer;
struct rb_node *n;
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n",
@@ -991,7 +991,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
buffer->extra_buffers_size,
buffer->transaction ? "active" : "delivered");
}
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
}
/**
@@ -1008,7 +1008,7 @@ void binder_alloc_print_pages(struct seq_file *m,
int lru = 0;
int free = 0;
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
/*
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
@@ -1024,7 +1024,7 @@ void binder_alloc_print_pages(struct seq_file *m,
lru++;
}
}
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
@@ -1040,10 +1040,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
struct rb_node *n;
int count = 0;
- spin_lock(&alloc->lock);
+ binder_alloc_lock(alloc);
for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++;
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
return count;
}
@@ -1088,7 +1088,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
goto err_mmget;
if (!mmap_read_trylock(mm))
goto err_mmap_read_lock_failed;
- if (!spin_trylock(&alloc->lock))
+ if (!binder_alloc_trylock(alloc))
goto err_get_alloc_lock_failed;
if (!page->page_ptr)
goto err_page_already_freed;
@@ -1108,7 +1108,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_kernel_end(alloc, index);
list_lru_isolate(lru, item);
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
spin_unlock(lock);
if (vma) {
@@ -1128,7 +1128,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
err_invalid_vma:
err_page_already_freed:
- spin_unlock(&alloc->lock);
+ binder_alloc_unlock(alloc);
err_get_alloc_lock_failed:
mmap_read_unlock(mm);
err_mmap_read_lock_failed:
@@ -1166,7 +1166,7 @@ static struct shrinker binder_shrinker = {
void binder_alloc_init(struct binder_alloc *alloc)
{
alloc->pid = current->group_leader->pid;
- spin_lock_init(&alloc->lock);
+ binder_alloc_lock_init(alloc);
INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 03eb6b9afc49..73d05f12c057 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -9,7 +9,7 @@
#include <linux/rbtree.h>
#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/list_lru.h>
@@ -98,7 +98,7 @@ struct binder_lru_page {
* struct binder_buffer objects used to track the user buffers
*/
struct binder_alloc {
- spinlock_t lock;
+ struct mutex mutex;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void __user *buffer;
@@ -144,23 +144,6 @@ void binder_alloc_print_allocated(struct seq_file *m,
void binder_alloc_print_pages(struct seq_file *m,
struct binder_alloc *alloc);
-/**
- * binder_alloc_get_free_async_space() - get free space available for async
- * @alloc: binder_alloc for this proc
- *
- * Return: the bytes remaining in the address-space for async transactions
- */
-static inline size_t
-binder_alloc_get_free_async_space(struct binder_alloc *alloc)
-{
- size_t free_async_space;
-
- spin_lock(&alloc->lock);
- free_async_space = alloc->free_async_space;
- spin_unlock(&alloc->lock);
- return free_async_space;
-}
-
unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer,
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 5bbd87dd6c25..0ff0a8a85dc3 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -461,6 +461,66 @@ struct binder_proc {
bool oneway_spam_detection_enabled;
};
+struct binder_proc_wrap {
+ struct binder_proc proc;
+ spinlock_t lock;
+};
+
+static inline struct binder_proc *
+binder_proc_entry(struct binder_alloc *alloc)
+{
+ return container_of(alloc, struct binder_proc, alloc);
+}
+
+static inline struct binder_proc_wrap *
+binder_proc_wrap_entry(struct binder_proc *proc)
+{
+ return container_of(proc, struct binder_proc_wrap, proc);
+}
+
+static inline struct binder_proc_wrap *
+binder_alloc_to_proc_wrap(struct binder_alloc *alloc)
+{
+ return binder_proc_wrap_entry(binder_proc_entry(alloc));
+}
+
+static inline void binder_alloc_lock_init(struct binder_alloc *alloc)
+{
+ spin_lock_init(&binder_alloc_to_proc_wrap(alloc)->lock);
+}
+
+static inline void binder_alloc_lock(struct binder_alloc *alloc)
+{
+ spin_lock(&binder_alloc_to_proc_wrap(alloc)->lock);
+}
+
+static inline void binder_alloc_unlock(struct binder_alloc *alloc)
+{
+ spin_unlock(&binder_alloc_to_proc_wrap(alloc)->lock);
+}
+
+static inline int binder_alloc_trylock(struct binder_alloc *alloc)
+{
+ return spin_trylock(&binder_alloc_to_proc_wrap(alloc)->lock);
+}
+
+/**
+ * binder_alloc_get_free_async_space() - get free space available for async
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the bytes remaining in the address-space for async transactions
+ */
+static inline size_t
+binder_alloc_get_free_async_space(struct binder_alloc *alloc)
+{
+ size_t free_async_space;
+
+ binder_alloc_lock(alloc);
+ free_async_space = alloc->free_async_space;
+ binder_alloc_unlock(alloc);
+ return free_async_space;
+}
+
/**
* struct binder_thread - binder thread bookkeeping
* @proc: binder process for this thread