This reverts commit 44e602b4e52f70f04620bbbf4fe46ecb40170bde.
This caused a performance regression particularly when pages are getting reclaimed. We don't need to acquire the mmap_lock to determine when the binder buffer has been fully initialized. A subsequent patch will bring back the lockless approach for this.
[cmllamas: resolved trivial conflicts with renaming of alloc->mm]
Fixes: 44e602b4e52f ("binder_alloc: add missing mmap_lock calls when using the VMA") Cc: Liam Howlett liam.howlett@oracle.com Cc: Suren Baghdasaryan surenb@google.com Cc: stable@vger.kernel.org Signed-off-by: Carlos Llamas cmllamas@google.com --- drivers/android/binder_alloc.c | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 55a3c3c2409f..92c814ec44fe 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -380,15 +380,12 @@ static struct binder_buffer *binder_alloc_new_buf_locked( size_t size, data_offsets_size; int ret;
- mmap_read_lock(alloc->mm); if (!binder_alloc_get_vma(alloc)) { - mmap_read_unlock(alloc->mm); binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf, no vma\n", alloc->pid); return ERR_PTR(-ESRCH); } - mmap_read_unlock(alloc->mm);
data_offsets_size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *)); @@ -916,25 +913,17 @@ void binder_alloc_print_pages(struct seq_file *m, * Make sure the binder_alloc is fully initialized, otherwise we might * read inconsistent state. */ - - mmap_read_lock(alloc->mm); - if (binder_alloc_get_vma(alloc) == NULL) { - mmap_read_unlock(alloc->mm); - goto uninitialized; - } - - mmap_read_unlock(alloc->mm); - for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - page = &alloc->pages[i]; - if (!page->page_ptr) - free++; - else if (list_empty(&page->lru)) - active++; - else - lru++; + if (binder_alloc_get_vma(alloc) != NULL) { + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + page = &alloc->pages[i]; + if (!page->page_ptr) + free++; + else if (list_empty(&page->lru)) + active++; + else + lru++; + } } - -uninitialized: mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
This reverts commit a43cfc87caaf46710c8027a8c23b8a55f1078f19.
This patch fixed an issue reported by syzkaller in [1]. However, this turned out to be only a band-aid in binder. The root cause, as bisected by syzkaller, was fixed by commit 5789151e48ac ("mm/mmap: undo ->mmap() when mas_preallocate() fails"). We no longer need the patch for binder.
Reverting such patch allows us to have a lockless access to alloc->vma in specific cases where the mmap_lock is not required. This approach avoids the contention that caused a performance regression.
[1] https://lore.kernel.org/all/0000000000004a0dbe05e1d749e0@google.com
[cmllamas: resolved conflicts with rework of alloc->mm and removal of binder_alloc_set_vma() also fixed comment section]
Fixes: a43cfc87caaf ("android: binder: stop saving a pointer to the VMA") Cc: Liam Howlett liam.howlett@oracle.com Cc: Suren Baghdasaryan surenb@google.com Cc: stable@vger.kernel.org Signed-off-by: Carlos Llamas cmllamas@google.com --- drivers/android/binder_alloc.c | 17 +++++++++-------- drivers/android/binder_alloc.h | 4 ++-- drivers/android/binder_alloc_selftest.c | 2 +- 3 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 92c814ec44fe..eb082b33115b 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -213,7 +213,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
if (mm) { mmap_read_lock(mm); - vma = vma_lookup(mm, alloc->vma_addr); + vma = alloc->vma; }
if (!vma && need_mm) { @@ -314,9 +314,11 @@ static inline struct vm_area_struct *binder_alloc_get_vma( { struct vm_area_struct *vma = NULL;
- if (alloc->vma_addr) - vma = vma_lookup(alloc->mm, alloc->vma_addr); - + if (alloc->vma) { + /* Look at description in binder_alloc_set_vma */ + smp_rmb(); + vma = alloc->vma; + } return vma; }
@@ -775,7 +777,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - alloc->vma_addr = vma->vm_start; + alloc->vma = vma;
return 0;
@@ -805,8 +807,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
buffers = 0; mutex_lock(&alloc->mutex); - BUG_ON(alloc->vma_addr && - vma_lookup(alloc->mm, alloc->vma_addr)); + BUG_ON(alloc->vma);
while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); @@ -958,7 +959,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - alloc->vma_addr = 0; + alloc->vma = 0; }
/** diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 0f811ac4bcff..138d1d5af9ce 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -75,7 +75,7 @@ struct binder_lru_page { /** * struct binder_alloc - per-binder proc state for binder allocator * @mutex: protects binder_alloc fields - * @vma_addr: vm_area_struct->vm_start passed to mmap_handler + * @vma: vm_area_struct passed to mmap_handler * (invariant after mmap) * @mm: copy of task->mm (invariant after open) * @buffer: base of per-proc address space mapped via mmap @@ -99,7 +99,7 @@ struct binder_lru_page { */ struct binder_alloc { struct mutex mutex; - unsigned long vma_addr; + struct vm_area_struct *vma; struct mm_struct *mm; void __user *buffer; struct list_head buffers; diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index 43a881073a42..c2b323bc3b3a 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc) if (!binder_selftest_run) return; mutex_lock(&binder_selftest_lock); - if (!binder_selftest_run || !alloc->vma_addr) + if (!binder_selftest_run || !alloc->vma) goto done; pr_info("STARTED\n"); binder_selftest_alloc_offset(alloc, end_offset, 0);
Bring back the original lockless design in binder_alloc to determine whether the buffer setup has been completed by the ->mmap() handler. However, this time use smp_load_acquire() and smp_store_release() to wrap all the ordering in a single macro call.
Also, add comments to make it evident that binder uses alloc->vma to determine when the binder_alloc has been fully initialized. In these scenarios acquiring the mmap_lock is not required.
Fixes: a43cfc87caaf ("android: binder: stop saving a pointer to the VMA") Cc: Liam Howlett liam.howlett@oracle.com Cc: Suren Baghdasaryan surenb@google.com Cc: stable@vger.kernel.org Signed-off-by: Carlos Llamas cmllamas@google.com --- drivers/android/binder_alloc.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-)
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index eb082b33115b..e7c9d466f8e8 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -309,17 +309,18 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, return vma ? -ENOMEM : -ESRCH; }
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc, + struct vm_area_struct *vma) +{ + /* pairs with smp_load_acquire in binder_alloc_get_vma() */ + smp_store_release(&alloc->vma, vma); +} + static inline struct vm_area_struct *binder_alloc_get_vma( struct binder_alloc *alloc) { - struct vm_area_struct *vma = NULL; - - if (alloc->vma) { - /* Look at description in binder_alloc_set_vma */ - smp_rmb(); - vma = alloc->vma; - } - return vma; + /* pairs with smp_store_release in binder_alloc_set_vma() */ + return smp_load_acquire(&alloc->vma); }
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) @@ -382,6 +383,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( size_t size, data_offsets_size; int ret;
+ /* Check binder_alloc is fully initialized */ if (!binder_alloc_get_vma(alloc)) { binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf, no vma\n", @@ -777,7 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; - alloc->vma = vma; + + /* Signal binder_alloc is fully initialized */ + binder_alloc_set_vma(alloc, vma);
return 0;
@@ -959,7 +963,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) */ void binder_alloc_vma_close(struct binder_alloc *alloc) { - alloc->vma = 0; + binder_alloc_set_vma(alloc, NULL); }
/**
linux-stable-mirror@lists.linaro.org