Recent updates to net filesystems enabled zero copy operations,
which require getting a user space page pinned.
This does not work for pages that were allocated via __get_user_pages
and then mapped to user-space via remap_pfn_rage.
remap_pfn_range_internal() will turn on VM_IO | VM_PFNMAP vma bits.
VM_PFNMAP in particular mark the pages as not having struct_page
associated with them, which is not the case for __get_user_pages()
This in turn makes any attempt to lock a page fail, and breaking
I/O from that address range.
This patch address it by special casing pages in those VMAs and not
calling vm_normal_page() for them.
Signed-off-by: Pantelis Antoniou <p.antoniou@xxxxxxxxxxxxxxxxxxx>
---
mm/gup.c | 22 ++++++++++++++++++----
1 file changed, 18 insertions(+), 4 deletions(-)
diff --git a/mm/gup.c b/mm/gup.c
index 84461d384ae2..e185c18c0c81 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -833,6 +833,20 @@ static inline bool can_follow_write_pte(pte_t pte, struct page *page,
return !userfaultfd_pte_wp(vma, pte);
}
+static struct page *gup_normal_page(struct vm_area_struct *vma,
+ unsigned long address, pte_t pte)
+{
+ unsigned long pfn;
+
+ if (vma->vm_flags & (VM_MIXEDMAP | VM_PFNMAP)) {
+ pfn = pte_pfn(pte);
+ if (!pfn_valid(pfn) || is_zero_pfn(pfn) || pfn > highest_memmap_pfn)
+ return NULL;
+ return pfn_to_page(pfn);
+ }
+ return vm_normal_page(vma, address, pte);