[PATCH] mm/uffd: Guard pte marker callers with PTE_MARKER_UFFD_WP

From: Peter Xu
Date: Wed Aug 03 2022 - 16:40:10 EST


Logically no !PTE_MARKER user should be able to trigger make_pte_marker()
in any path, however to add extra guard with it put all pte marker code
into CONFIG_PTE_MARKER_UFFD_WP so they'll not be compiled in if not
configured.

Reported-by: syzbot+824e71311e757a9689ff@xxxxxxxxxxxxxxxxxxxxxxxxx
Signed-off-by: Peter Xu <peterx@xxxxxxxxxx>
---
mm/hugetlb.c | 6 ++++++
mm/mprotect.c | 2 ++
2 files changed, 8 insertions(+)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a18c071c294e..e632cdf1e3f4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5049,6 +5049,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
* unmapped and its refcount is dropped, so just clear pte here.
*/
if (unlikely(!pte_present(pte))) {
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
/*
* If the pte was wr-protected by uffd-wp in any of the
* swap forms, meanwhile the caller does not want to
@@ -5060,6 +5061,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
set_huge_pte_at(mm, address, ptep,
make_pte_marker(PTE_MARKER_UFFD_WP));
else
+#endif
huge_pte_clear(mm, address, ptep, sz);
spin_unlock(ptl);
continue;
@@ -5088,11 +5090,13 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
if (huge_pte_dirty(pte))
set_page_dirty(page);
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
/* Leave a uffd-wp pte marker if needed */
if (huge_pte_uffd_wp(pte) &&
!(zap_flags & ZAP_FLAG_DROP_MARKER))
set_huge_pte_at(mm, address, ptep,
make_pte_marker(PTE_MARKER_UFFD_WP));
+#endif
hugetlb_count_sub(pages_per_huge_page(h), mm);
page_remove_rmap(page, vma, true);

@@ -6387,10 +6391,12 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
pages++;
} else {
/* None pte */
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
if (unlikely(uffd_wp))
/* Safe to modify directly (none->non-present). */
set_huge_pte_at(mm, address, ptep,
make_pte_marker(PTE_MARKER_UFFD_WP));
+#endif
}
spin_unlock(ptl);
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index ba5592655ee3..85ef55a74d6e 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -221,6 +221,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
} else {
/* It must be an none page, or what else?.. */
WARN_ON_ONCE(!pte_none(oldpte));
+#ifdef CONFIG_PTE_MARKER_UFFD_WP
if (unlikely(uffd_wp && !vma_is_anonymous(vma))) {
/*
* For file-backed mem, we need to be able to
@@ -232,6 +233,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
make_pte_marker(PTE_MARKER_UFFD_WP));
pages++;
}
+#endif
}
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
--
2.32.0


--dGeUfg0yu8nnRmpU--