summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoel Fernandes <joelaf@google.com>2017-08-01 15:49:20 -0700
committerMaggie White <maggiewhite@google.com>2017-09-12 19:29:26 -0700
commit7fb0fd2b8db0a0fe6eb0e80ff8faa5f46cd793bf (patch)
tree2a04e401653956cedb10736f56576a619cb0a10d
parenta427ab01b06a53721b7077f9a9ebf227db8c481e (diff)
downloadtegra-7fb0fd2b8db0a0fe6eb0e80ff8faa5f46cd793bf.tar.gz
binder: always allocate/map first BINDER_MIN_ALLOC pages
Certain usecases like camera are constantly allocating and freeing binder buffers beyond the first 4k resulting in mmap_sem contention. If we expand the allocated range from 4k to something higher, we can reduce the contention. Tests show that 6 pages is enough to cause very little update_page_range operations and reduces contention. Bug: 36727951 Change-Id: I28bc3fb9b33c764c257e28487712fce2a3c1078b Reported-by: Tim Murray <timmurray@google.com> Signed-off-by: Joel Fernandes <joelaf@google.com> Pre-allocate 1 instead of 6 pages as in the original patch, as we use this pre-allocated page to prevent the first page from getting unpinned after removing the buffer headers, rather than pinning pages to speedup larger transactions. Change-Id: I7c3e4884a9538ecfd86601d31c5bcfd6611d37a4 Signed-off-by: Sherry Yang <sherryy@android.com>
-rw-r--r--drivers/staging/android/binder.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index b92a38767aa0..34934841cfe8 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -42,6 +42,8 @@
#include "binder.h"
#include "binder_trace.h"
+#define BINDER_MIN_ALLOC (1 * PAGE_SIZE)
+
static DEFINE_MUTEX(binder_main_lock);
static DEFINE_MUTEX(binder_deferred_lock);
static DEFINE_MUTEX(binder_mmap_lock);
@@ -632,9 +634,9 @@ static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
return NULL;
}
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+static int __binder_update_page_range(struct binder_proc *proc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
{
void *page_addr;
unsigned long user_page_addr;
@@ -742,6 +744,20 @@ err_no_vma:
return -ENOMEM;
}
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ /*
+ * For regular updates, move up start if needed since MIN_ALLOC pages
+ * are always mapped
+ */
+ if (start - proc->buffer < BINDER_MIN_ALLOC)
+ start = proc->buffer + BINDER_MIN_ALLOC;
+
+ return __binder_update_page_range(proc, allocate, start, end, vma);
+}
+
static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
size_t data_size,
size_t offsets_size,
@@ -3533,7 +3549,8 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
/* binder_update_page_range assumes preemption is disabled */
preempt_disable();
- ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
+ ret = __binder_update_page_range(proc, 1, proc->buffer,
+ proc->buffer + BINDER_MIN_ALLOC, vma);
preempt_enable_no_resched();
if (ret) {
ret = -ENOMEM;