[PATCH 4.4 79/88] mm: replace get_user_pages() write/force parameters with gup_flags

From: Greg Kroah-Hartman
Date: Fri Dec 14 2018 - 07:16:58 EST


4.4-stable review patch. If anyone has any objections, please let me know.

------------------

From: Lorenzo Stoakes <lstoakes@xxxxxxxxx>

commit 768ae309a96103ed02eb1e111e838c87854d8b51 upstream.

This removes the 'write' and 'force' from get_user_pages() and replaces
them with 'gup_flags' to make the use of FOLL_FORCE explicit in callers
as use of this flag can result in surprising behaviour (and hence bugs)
within the mm subsystem.

Signed-off-by: Lorenzo Stoakes <lstoakes@xxxxxxxxx>
Acked-by: Christian KÃnig <christian.koenig@xxxxxxx>
Acked-by: Jesper Nilsson <jesper.nilsson@xxxxxxxx>
Acked-by: Michal Hocko <mhocko@xxxxxxxx>
Reviewed-by: Jan Kara <jack@xxxxxxx>
Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
[bwh: Backported to 4.4:
- Drop changes in rapidio, vchiq, goldfish
- Keep the "write" variable in amdgpu_ttm_tt_pin_userptr() as it's still
needed
- Also update calls from various other places that now use
get_user_pages_remote() upstream, which were updated there by commit
9beae1ea8930 "mm: replace get_user_pages_remote() write/force ..."
- Also update calls from hfi1 and ipath
- Adjust context]
Signed-off-by: Ben Hutchings <ben.hutchings@xxxxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/cris/arch-v32/drivers/cryptocop.c | 4 +---
arch/ia64/kernel/err_inject.c | 2 +-
arch/x86/mm/mpx.c | 3 +--
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++++-
drivers/gpu/drm/i915/i915_gem_userptr.c | 6 +++++-
drivers/gpu/drm/radeon/radeon_ttm.c | 2 +-
drivers/gpu/drm/via/via_dmablit.c | 4 ++--
drivers/infiniband/core/umem.c | 6 +++++-
drivers/infiniband/core/umem_odp.c | 7 +++++--
drivers/infiniband/hw/mthca/mthca_memfree.c | 4 ++--
drivers/infiniband/hw/qib/qib_user_pages.c | 3 ++-
drivers/infiniband/hw/usnic/usnic_uiom.c | 5 ++++-
drivers/media/v4l2-core/videobuf-dma-sg.c | 7 +++++--
drivers/misc/mic/scif/scif_rma.c | 3 +--
drivers/misc/sgi-gru/grufault.c | 2 +-
drivers/staging/rdma/hfi1/user_pages.c | 2 +-
drivers/staging/rdma/ipath/ipath_user_pages.c | 2 +-
drivers/virt/fsl_hypervisor.c | 4 ++--
fs/exec.c | 9 +++++++--
include/linux/mm.h | 2 +-
kernel/events/uprobes.c | 4 ++--
mm/gup.c | 15 +++++----------
mm/memory.c | 6 +++++-
mm/mempolicy.c | 2 +-
mm/nommu.c | 18 ++++--------------
security/tomoyo/domain.c | 3 ++-
26 files changed, 72 insertions(+), 59 deletions(-)

--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2724,7 +2724,6 @@ static int cryptocop_ioctl_process(struc
(unsigned long int)(oper.indata + prev_ix),
noinpages,
0, /* read access only for in data */
- 0, /* no force */
inpages,
NULL);

@@ -2740,8 +2739,7 @@ static int cryptocop_ioctl_process(struc
current->mm,
(unsigned long int)oper.cipher_outdata,
nooutpages,
- 1, /* write access for out data */
- 0, /* no force */
+ FOLL_WRITE, /* write access for out data */
outpages,
NULL);
up_read(&current->mm->mmap_sem);
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -143,7 +143,7 @@ store_virtual_to_phys(struct device *dev
int ret;

ret = get_user_pages(current, current->mm, virt_addr,
- 1, VM_READ, 0, NULL, NULL);
+ 1, FOLL_WRITE, NULL, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr);
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -536,10 +536,9 @@ static int mpx_resolve_fault(long __user
{
long gup_ret;
int nr_pages = 1;
- int force = 0;

gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
- nr_pages, write, force, NULL, NULL);
+ nr_pages, write ? FOLL_WRITE : 0, NULL, NULL);
/*
* get_user_pages() returns number of pages gotten.
* 0 means we failed to fault in and get anything,
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -496,9 +496,13 @@ static int amdgpu_ttm_tt_pin_userptr(str
int r;

int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ unsigned int flags = 0;
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;

+ if (write)
+ flags |= FOLL_WRITE;
+
if (current->mm != gtt->usermm)
return -EPERM;

@@ -519,7 +523,7 @@ static int amdgpu_ttm_tt_pin_userptr(str
struct page **pages = ttm->pages + pinned;

r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
+ flags, pages, NULL);
if (r < 0)
goto release_pages;

--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -581,13 +581,17 @@ __i915_gem_userptr_get_pages_worker(stru
pvec = drm_malloc_ab(npages, sizeof(struct page *));
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
+ unsigned int flags = 0;
+
+ if (!obj->userptr.read_only)
+ flags |= FOLL_WRITE;

down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
- !obj->userptr.read_only, 0,
+ flags,
pvec + pinned, NULL);
if (ret < 0)
break;
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -557,7 +557,7 @@ static int radeon_ttm_tt_pin_userptr(str
struct page **pages = ttm->pages + pinned;

r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
+ write ? FOLL_WRITE : 0, pages, NULL);
if (r < 0)
goto release_pages;

--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -242,8 +242,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t
ret = get_user_pages(current, current->mm,
(unsigned long)xfer->mem_addr,
vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE),
- 0, vsg->pages, NULL);
+ (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
+ vsg->pages, NULL);

up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -95,6 +95,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
DEFINE_DMA_ATTRS(attrs);
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
+ unsigned int gup_flags = FOLL_WRITE;

if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
@@ -177,6 +178,9 @@ struct ib_umem *ib_umem_get(struct ib_uc
if (ret)
goto out;

+ if (!umem->writable)
+ gup_flags |= FOLL_FORCE;
+
need_release = 1;
sg_list_start = umem->sg_head.sgl;

@@ -184,7 +188,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
ret = get_user_pages(current, current->mm, cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- 1, !umem->writable, page_list, vma_list);
+ gup_flags, page_list, vma_list);

if (ret < 0)
goto out;
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_
u64 off;
int j, k, ret = 0, start_idx, npages = 0;
u64 base_virt_addr;
+ unsigned int flags = 0;

if (access_mask == 0)
return -EINVAL;
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_
goto out_put_task;
}

+ if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ flags |= FOLL_WRITE;
+
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
k = start_idx;

@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_
*/
npages = get_user_pages(owning_process, owning_mm, user_virt,
gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT, 0,
- local_page_list, NULL);
+ flags, local_page_list, NULL);
up_read(&owning_mm->mmap_sem);

if (npages < 0)
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,8 +472,8 @@ int mthca_map_user_db(struct mthca_dev *
goto out;
}

- ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
- pages, NULL);
+ ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1,
+ FOLL_WRITE, pages, NULL);
if (ret < 0)
goto out;

--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -68,7 +68,8 @@ static int __qib_get_user_pages(unsigned
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got,
+ FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -113,6 +113,7 @@ static int usnic_uiom_get_pages(unsigned
int flags;
dma_addr_t pa;
DEFINE_DMA_ATTRS(attrs);
+ unsigned int gup_flags;

if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
@@ -140,6 +141,8 @@ static int usnic_uiom_get_pages(unsigned

flags = IOMMU_READ | IOMMU_CACHE;
flags |= (writable) ? IOMMU_WRITE : 0;
+ gup_flags = FOLL_WRITE;
+ gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK;
ret = 0;

@@ -147,7 +150,7 @@ static int usnic_uiom_get_pages(unsigned
ret = get_user_pages(current, current->mm, cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
- 1, !writable, page_list, NULL);
+ gup_flags, page_list, NULL);

if (ret < 0)
goto out;
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked
{
unsigned long first, last;
int err, rw = 0;
+ unsigned int flags = FOLL_FORCE;

dma->direction = direction;
switch (dma->direction) {
@@ -178,13 +179,15 @@ static int videobuf_dma_init_user_locked
if (NULL == dma->pages)
return -ENOMEM;

+ if (rw == READ)
+ flags |= FOLL_WRITE;
+
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);

err = get_user_pages(current, current->mm,
data & PAGE_MASK, dma->nr_pages,
- rw == READ, 1, /* force */
- dma->pages, NULL);
+ flags, dma->pages, NULL);

if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1398,8 +1398,7 @@ retry:
mm,
(u64)addr,
nr_pages,
- !!(prot & SCIF_PROT_WRITE),
- 0,
+ (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
pinned_pages->pages,
NULL);
up_write(&mm->mmap_sem);
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -199,7 +199,7 @@ static int non_atomic_pte_lookup(struct
*pageshift = PAGE_SHIFT;
#endif
if (get_user_pages
- (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
+ (current, current->mm, vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
--- a/drivers/staging/rdma/hfi1/user_pages.c
+++ b/drivers/staging/rdma/hfi1/user_pages.c
@@ -85,7 +85,7 @@ static int __hfi1_get_user_pages(unsigne
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got, FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
--- a/drivers/staging/rdma/ipath/ipath_user_pages.c
+++ b/drivers/staging/rdma/ipath/ipath_user_pages.c
@@ -72,7 +72,7 @@ static int __ipath_get_user_pages(unsign
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got, FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -246,8 +246,8 @@ static long ioctl_memcpy(struct fsl_hv_i
down_read(&current->mm->mmap_sem);
num_pinned = get_user_pages(current, current->mm,
param.local_vaddr - lb_offset, num_pages,
- (param.source == -1) ? READ : WRITE,
- 0, pages, NULL);
+ (param.source == -1) ? 0 : FOLL_WRITE,
+ pages, NULL);
up_read(&current->mm->mmap_sem);

if (num_pinned != num_pages) {
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -191,6 +191,7 @@ static struct page *get_arg_page(struct
{
struct page *page;
int ret;
+ unsigned int gup_flags = FOLL_FORCE;

#ifdef CONFIG_STACK_GROWSUP
if (write) {
@@ -199,8 +200,12 @@ static struct page *get_arg_page(struct
return NULL;
}
#endif
- ret = get_user_pages(current, bprm->mm, pos,
- 1, write, 1, &page, NULL);
+
+ if (write)
+ gup_flags |= FOLL_WRITE;
+
+ ret = get_user_pages(current, bprm->mm, pos, 1, gup_flags,
+ &page, NULL);
if (ret <= 0)
return NULL;

--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1199,7 +1199,7 @@ long __get_user_pages(struct task_struct
struct vm_area_struct **vmas, int *nonblocking);
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -299,7 +299,7 @@ int uprobe_write_opcode(struct mm_struct

retry:
/* Read the page with vaddr into memory */
- ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
+ ret = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, &vma);
if (ret <= 0)
return ret;

@@ -1700,7 +1700,7 @@ static int is_trap_at_addr(struct mm_str
if (likely(result == 0))
goto out;

- result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
+ result = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &page, NULL);
if (result < 0)
return result;

--- a/mm/gup.c
+++ b/mm/gup.c
@@ -854,18 +854,13 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
*/
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages, int write,
- int force, struct page **pages, struct vm_area_struct **vmas)
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas)
{
- unsigned int flags = FOLL_TOUCH;
-
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
-
return __get_user_pages_locked(tsk, mm, start, nr_pages,
- pages, vmas, NULL, false, flags);
+ pages, vmas, NULL, false,
+ gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);

--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3715,6 +3715,10 @@ static int __access_remote_vm(struct tas
{
struct vm_area_struct *vma;
void *old_buf = buf;
+ unsigned int flags = FOLL_FORCE;
+
+ if (write)
+ flags |= FOLL_WRITE;

down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
@@ -3724,7 +3728,7 @@ static int __access_remote_vm(struct tas
struct page *page = NULL;

ret = get_user_pages(tsk, mm, addr, 1,
- write, 1, &page, &vma);
+ flags, &page, &vma);
if (ret <= 0) {
#ifndef CONFIG_HAVE_IOREMAP_PROT
break;
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -818,7 +818,7 @@ static int lookup_node(struct mm_struct
struct page *p;
int err;

- err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
+ err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, &p, NULL);
if (err >= 0) {
err = page_to_nid(p);
put_page(p);
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -184,18 +184,11 @@ finish_or_fault:
*/
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
- int flags = 0;
-
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
-
- return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
- NULL);
+ return __get_user_pages(tsk, mm, start, nr_pages,
+ gup_flags, pages, vmas, NULL);
}
EXPORT_SYMBOL(get_user_pages);

@@ -204,10 +197,7 @@ long get_user_pages_locked(struct task_s
unsigned int gup_flags, struct page **pages,
int *locked)
{
- int write = gup_flags & FOLL_WRITE;
- int force = gup_flags & FOLL_FORCE;
-
- return get_user_pages(tsk, mm, start, nr_pages, write, force,
+ return get_user_pages(tsk, mm, start, nr_pages, gup_flags,
pages, NULL);
}
EXPORT_SYMBOL(get_user_pages_locked);
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -874,7 +874,8 @@ bool tomoyo_dump_page(struct linux_binpr
}
/* Same with get_arg_page(bprm, pos, 0) in fs/exec.c */
#ifdef CONFIG_MMU
- if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0)
+ if (get_user_pages(current, bprm->mm, pos, 1,
+ FOLL_FORCE, &page, NULL) <= 0)
return false;
#else
page = bprm->page[pos / PAGE_SIZE];