Re: [PATCH -next v7 6/6] riscv/mm: Enable ARCH_SUPPORTS_PAGE_TABLE_CHECK

From: Tong Tiangen
Date: Tue May 24 2022 - 02:40:17 EST




在 2022/5/24 12:17, Palmer Dabbelt 写道:
On Sat, 07 May 2022 04:01:14 PDT (-0700), tongtiangen@xxxxxxxxxx wrote:
As commit d283d422c6c4 ("x86: mm: add x86_64 support for page table check")
, enable ARCH_SUPPORTS_PAGE_TABLE_CHECK on riscv.

Add additional page table check stubs for page table helpers, these stubs
can be used to check the existing page table entries.

Signed-off-by: Tong Tiangen <tongtiangen@xxxxxxxxxx>
Reviewed-by: Pasha Tatashin <pasha.tatashin@xxxxxxxxxx>

This doesn't build on rv32 (or presumably any other 32-bit arch) something like this should do it

   diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
   index 2b18e4410c4d..ea658ed6a1b1 100644
   --- a/arch/riscv/include/asm/pgtable.h
   +++ b/arch/riscv/include/asm/pgtable.h
   @@ -690,11 +690,13 @@ static inline bool pmd_user_accessible_page(pmd_t pmd)
        return pmd_leaf(pmd) && pmd_user(pmd);
    }
   +#ifdef CONFIG_64BIT
    static inline bool pud_user_accessible_page(pud_t pud)
    {
        return pud_leaf(pud) && pud_user(pud);
    }
   -#endif
   +#endif /* CONFIG_64BIT */
   +#endif /* CONFIG_PAGE_TABLE_CHECK */
    #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    static inline int pmd_trans_huge(pmd_t pmd)
   diff --git a/mm/page_table_check.c b/mm/page_table_check.c
   index 3692bea2ea2c..ac9bddfa1398 100644
   --- a/mm/page_table_check.c
   +++ b/mm/page_table_check.c
   @@ -165,6 +165,7 @@ void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
    }
    EXPORT_SYMBOL(__page_table_check_pmd_clear);
   +#if CONFIG_PGTABLE_LEVELS > 3
    void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
                      pud_t pud)
    {
   @@ -177,6 +178,7 @@ void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
        }
    }
    EXPORT_SYMBOL(__page_table_check_pud_clear);
   +#endif
    void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
                    pte_t *ptep, pte_t pte)
   @@ -208,6 +210,7 @@ void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
    }
    EXPORT_SYMBOL(__page_table_check_pmd_set);
   +#if CONFIG_PGTABLE_LEVELS > 3
    void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
                    pud_t *pudp, pud_t pud)
    {
   @@ -222,6 +225,7 @@ void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
        }
    }
    EXPORT_SYMBOL(__page_table_check_pud_set);
   +#endif
    void __page_table_check_pte_clear_range(struct mm_struct *mm,
                        unsigned long addr,

with those changes this passes my tests, so

Acked-by: Palmer Dabbelt <palmer@xxxxxxxxxxxx>

Feel free to take this along with the rest of them if that's easier, the RISC-V bits are pretty light-weight.  Also happy to do some sort of shared tag once the other issues get sorted out.

Thanks!

Hi, palmer, thanks for you reply.

I fixed the issue on riscv32 a few days ago. It seems that it can also solve the issue you said.

https://lore.kernel.org/lkml/20220517074548.2227779-2-tongtiangen@xxxxxxxxxx/

Thanks,
Tong.

---
 arch/riscv/Kconfig               |  1 +
 arch/riscv/include/asm/pgtable.h | 71 +++++++++++++++++++++++++++++---
 2 files changed, 66 insertions(+), 6 deletions(-)

diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 715390feb6ea..bb9fde09eea5 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -38,6 +38,7 @@ config RISCV
     select ARCH_SUPPORTS_ATOMIC_RMW
     select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
     select ARCH_SUPPORTS_HUGETLBFS if MMU
+    select ARCH_SUPPORTS_PAGE_TABLE_CHECK
     select ARCH_USE_MEMTEST
     select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
     select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 046b44225623..62e733c85836 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -114,6 +114,8 @@
 #include <asm/pgtable-32.h>
 #endif /* CONFIG_64BIT */

+#include <linux/page_table_check.h>
+
 #ifdef CONFIG_XIP_KERNEL
 #define XIP_FIXUP(addr) ({                            \
     uintptr_t __a = (uintptr_t)(addr);                    \
@@ -315,6 +317,11 @@ static inline int pte_exec(pte_t pte)
     return pte_val(pte) & _PAGE_EXEC;
 }

+static inline int pte_user(pte_t pte)
+{
+    return pte_val(pte) & _PAGE_USER;
+}
+
 static inline int pte_huge(pte_t pte)
 {
     return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
@@ -446,7 +453,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)

 void flush_icache_pte(pte_t pte);

-static inline void set_pte_at(struct mm_struct *mm,
+static inline void __set_pte_at(struct mm_struct *mm,
     unsigned long addr, pte_t *ptep, pte_t pteval)
 {
     if (pte_present(pteval) && pte_exec(pteval))
@@ -455,10 +462,17 @@ static inline void set_pte_at(struct mm_struct *mm,
     set_pte(ptep, pteval);
 }

+static inline void set_pte_at(struct mm_struct *mm,
+    unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+    page_table_check_pte_set(mm, addr, ptep, pteval);
+    __set_pte_at(mm, addr, ptep, pteval);
+}
+
 static inline void pte_clear(struct mm_struct *mm,
     unsigned long addr, pte_t *ptep)
 {
-    set_pte_at(mm, addr, ptep, __pte(0));
+    __set_pte_at(mm, addr, ptep, __pte(0));
 }

 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
@@ -479,7 +493,11 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
                        unsigned long address, pte_t *ptep)
 {
-    return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
+    pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
+
+    page_table_check_pte_clear(mm, address, pte);
+
+    return pte;
 }

 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -546,6 +564,13 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
     return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
 }

+#define __pud_to_phys(pud)  (pud_val(pud) >> _PAGE_PFN_SHIFT << PAGE_SHIFT)
+
+static inline unsigned long pud_pfn(pud_t pud)
+{
+    return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
+}
+
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 {
     return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
@@ -567,6 +592,11 @@ static inline int pmd_young(pmd_t pmd)
     return pte_young(pmd_pte(pmd));
 }

+static inline int pmd_user(pmd_t pmd)
+{
+    return pte_user(pmd_pte(pmd));
+}
+
 static inline pmd_t pmd_mkold(pmd_t pmd)
 {
     return pte_pmd(pte_mkold(pmd_pte(pmd)));
@@ -600,15 +630,39 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                 pmd_t *pmdp, pmd_t pmd)
 {
-    return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
+    page_table_check_pmd_set(mm, addr, pmdp, pmd);
+    return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
+}
+
+static inline int pud_user(pud_t pud)
+{
+    return pte_user(pud_pte(pud));
 }

 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
                 pud_t *pudp, pud_t pud)
 {
-    return set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
+    page_table_check_pud_set(mm, addr, pudp, pud);
+    return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
+}
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+static inline bool pte_user_accessible_page(pte_t pte)
+{
+    return pte_present(pte) && pte_user(pte);
+}
+
+static inline bool pmd_user_accessible_page(pmd_t pmd)
+{
+    return pmd_leaf(pmd) && pmd_user(pmd);
 }

+static inline bool pud_user_accessible_page(pud_t pud)
+{
+    return pud_leaf(pud) && pud_user(pud);
+}
+#endif
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline int pmd_trans_huge(pmd_t pmd)
 {
@@ -634,7 +688,11 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
                     unsigned long address, pmd_t *pmdp)
 {
-    return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+    pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
+
+    page_table_check_pmd_clear(mm, address, pmd);
+
+    return pmd;
 }

 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -648,6 +706,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
                 unsigned long address, pmd_t *pmdp, pmd_t pmd)
 {
+    page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
     return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
.