Re: [PATCH 3/7] binder: rename alloc->vma_vm_mm to alloc->mm
From: Todd Kjos
Date: Tue Aug 30 2022 - 18:21:03 EST
On Mon, Aug 29, 2022 at 1:13 PM 'Carlos Llamas' via kernel-team
<kernel-team@xxxxxxxxxxx> wrote:
>
> Rename ->vma_vm_mm to ->mm to reflect the fact that we no longer cache
> this reference from vma->vm_mm but from current->mm instead.
>
> No functional changes in this patch.
>
> Signed-off-by: Carlos Llamas <cmllamas@xxxxxxxxxx>
Acked-by: Todd Kjos <tkjos@xxxxxxxxxx>
> ---
> drivers/android/binder_alloc.c | 34 +++++++++++++++++-----------------
> drivers/android/binder_alloc.h | 4 ++--
> 2 files changed, 19 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
> index 9b1778c00610..749a4cd30a83 100644
> --- a/drivers/android/binder_alloc.c
> +++ b/drivers/android/binder_alloc.c
> @@ -208,8 +208,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
> }
> }
>
> - if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
> - mm = alloc->vma_vm_mm;
> + if (need_mm && mmget_not_zero(alloc->mm))
> + mm = alloc->mm;
>
> if (mm) {
> mmap_read_lock(mm);
> @@ -322,9 +322,9 @@ static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
> */
> if (vma) {
> vm_start = vma->vm_start;
> - mmap_assert_write_locked(alloc->vma_vm_mm);
> + mmap_assert_write_locked(alloc->mm);
> } else {
> - mmap_assert_locked(alloc->vma_vm_mm);
> + mmap_assert_locked(alloc->mm);
> }
>
> alloc->vma_addr = vm_start;
> @@ -336,7 +336,7 @@ static inline struct vm_area_struct *binder_alloc_get_vma(
> struct vm_area_struct *vma = NULL;
>
> if (alloc->vma_addr)
> - vma = vma_lookup(alloc->vma_vm_mm, alloc->vma_addr);
> + vma = vma_lookup(alloc->mm, alloc->vma_addr);
>
> return vma;
> }
> @@ -401,15 +401,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
> size_t size, data_offsets_size;
> int ret;
>
> - mmap_read_lock(alloc->vma_vm_mm);
> + mmap_read_lock(alloc->mm);
> if (!binder_alloc_get_vma(alloc)) {
> - mmap_read_unlock(alloc->vma_vm_mm);
> + mmap_read_unlock(alloc->mm);
> binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
> "%d: binder_alloc_buf, no vma\n",
> alloc->pid);
> return ERR_PTR(-ESRCH);
> }
> - mmap_read_unlock(alloc->vma_vm_mm);
> + mmap_read_unlock(alloc->mm);
>
> data_offsets_size = ALIGN(data_size, sizeof(void *)) +
> ALIGN(offsets_size, sizeof(void *));
> @@ -823,7 +823,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
> buffers = 0;
> mutex_lock(&alloc->mutex);
> BUG_ON(alloc->vma_addr &&
> - vma_lookup(alloc->vma_vm_mm, alloc->vma_addr));
> + vma_lookup(alloc->mm, alloc->vma_addr));
>
> while ((n = rb_first(&alloc->allocated_buffers))) {
> buffer = rb_entry(n, struct binder_buffer, rb_node);
> @@ -873,8 +873,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
> kfree(alloc->pages);
> }
> mutex_unlock(&alloc->mutex);
> - if (alloc->vma_vm_mm)
> - mmdrop(alloc->vma_vm_mm);
> + if (alloc->mm)
> + mmdrop(alloc->mm);
>
> binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
> "%s: %d buffers %d, pages %d\n",
> @@ -931,13 +931,13 @@ void binder_alloc_print_pages(struct seq_file *m,
> * read inconsistent state.
> */
>
> - mmap_read_lock(alloc->vma_vm_mm);
> + mmap_read_lock(alloc->mm);
> if (binder_alloc_get_vma(alloc) == NULL) {
> - mmap_read_unlock(alloc->vma_vm_mm);
> + mmap_read_unlock(alloc->mm);
> goto uninitialized;
> }
>
> - mmap_read_unlock(alloc->vma_vm_mm);
> + mmap_read_unlock(alloc->mm);
> for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
> page = &alloc->pages[i];
> if (!page->page_ptr)
> @@ -1020,7 +1020,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
> index = page - alloc->pages;
> page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
>
> - mm = alloc->vma_vm_mm;
> + mm = alloc->mm;
> if (!mmget_not_zero(mm))
> goto err_mmget;
> if (!mmap_read_trylock(mm))
> @@ -1089,8 +1089,8 @@ static struct shrinker binder_shrinker = {
> void binder_alloc_init(struct binder_alloc *alloc)
> {
> alloc->pid = current->group_leader->pid;
> - alloc->vma_vm_mm = current->mm;
> - mmgrab(alloc->vma_vm_mm);
> + alloc->mm = current->mm;
> + mmgrab(alloc->mm);
> mutex_init(&alloc->mutex);
> INIT_LIST_HEAD(&alloc->buffers);
> }
> diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
> index 0c37935ff7a2..fe80cc405707 100644
> --- a/drivers/android/binder_alloc.h
> +++ b/drivers/android/binder_alloc.h
> @@ -78,7 +78,7 @@ struct binder_lru_page {
> * (invariant after mmap)
> * @tsk: tid for task that called init for this proc
> * (invariant after init)
> - * @vma_vm_mm: copy of vma->vm_mm (invariant after mmap)
> + * @mm: copy of task->mm (invariant after open)
> * @buffer: base of per-proc address space mapped via mmap
> * @buffers: list of all buffers for this proc
> * @free_buffers: rb tree of buffers available for allocation
> @@ -101,7 +101,7 @@ struct binder_lru_page {
> struct binder_alloc {
> struct mutex mutex;
> unsigned long vma_addr;
> - struct mm_struct *vma_vm_mm;
> + struct mm_struct *mm;
> void __user *buffer;
> struct list_head buffers;
> struct rb_root free_buffers;
> --
> 2.37.2.672.g94769d06f0-goog
>
> --
> To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@xxxxxxxxxxx.
>