[PATCH v4 36/49] mm: Add vma iterator to vma_adjust() arguments

From: Liam R. Howlett
Date: Fri Jan 20 2023 - 13:53:57 EST


From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx>

Change the vma_adjust() function definition to accept the vma iterator
and pass it through to __vma_adjust().

Update fs/exec to use the new vma_adjust() function parameters.

Update mm/mremap to use the new vma_adjust() function parameters.

Revert the __split_vma() calls back from __vma_adjust() to vma_adjust()
and pass through the vma iterator.

Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx>
---
fs/exec.c | 11 ++++-------
include/linux/mm.h | 9 ++++-----
mm/mmap.c | 10 +++++-----
mm/mremap.c | 4 ++--
4 files changed, 15 insertions(+), 19 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index b98647eeae9f..76ee62e1d3f1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -699,7 +699,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
/*
* cover the whole range: [new_start, old_end)
*/
- if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
+ if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
return -ENOMEM;

/*
@@ -731,12 +731,9 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
}
tlb_finish_mmu(&tlb);

- /*
- * Shrink the vma to just the new range. Always succeeds.
- */
- vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
-
- return 0;
+ vma_prev(&vmi);
+ /* Shrink the vma to just the new range */
+ return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff, NULL);
}

/*
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 479c79204d96..75b6d06d69d5 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2834,12 +2834,11 @@ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admi
extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand);
-static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
+static inline int vma_adjust(struct vma_iterator *vmi,
+ struct vm_area_struct *vma, unsigned long start, unsigned long end,
+ pgoff_t pgoff, struct vm_area_struct *insert)
{
- VMA_ITERATOR(vmi, vma->vm_mm, start);
-
- return __vma_adjust(&vmi, vma, start, end, pgoff, insert, NULL);
+ return __vma_adjust(vmi, vma, start, end, pgoff, insert, NULL);
}
extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
diff --git a/mm/mmap.c b/mm/mmap.c
index c7d72475ba6d..b6bedc07ef11 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2213,12 +2213,12 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
new->vm_ops->open(new);

if (new_below)
- err = __vma_adjust(vmi, vma, addr, vma->vm_end,
- vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT),
- new, NULL);
+ err = vma_adjust(vmi, vma, addr, vma->vm_end,
+ vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT),
+ new);
else
- err = __vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff,
- new, NULL);
+ err = vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff,
+ new);

/* Success. */
if (!err) {
diff --git a/mm/mremap.c b/mm/mremap.c
index 71ba8eddd836..2176f0cc7f9a 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1047,8 +1047,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
extension_end, vma->vm_flags, vma->anon_vma,
vma->vm_file, extension_pgoff, vma_policy(vma),
vma->vm_userfaultfd_ctx, anon_vma_name(vma));
- } else if (vma_adjust(vma, vma->vm_start, addr + new_len,
- vma->vm_pgoff, NULL)) {
+ } else if (vma_adjust(&vmi, vma, vma->vm_start,
+ addr + new_len, vma->vm_pgoff, NULL)) {
vma = NULL;
}
if (!vma) {
--
2.35.1