[PATCH v2 5/6] mm: tlb: Provide flush_*_tlb_range wrappers

From: Zhenyu Ye
Date: Thu Apr 23 2020 - 09:59:43 EST


This patch provides flush_{pte|pmd|pud|p4d}_tlb_range() in generic
code, which are expressed through the mmu_gather APIs. These
interface set tlb->cleared_* and finally call tlb_flush(), so we
can do the tlb invalidation according to the information in
struct mmu_gather.

Signed-off-by: Zhenyu Ye <yezhenyu2@xxxxxxxxxx>
---
include/asm-generic/pgtable.h | 12 ++++++++++--
mm/pgtable-generic.c | 22 ++++++++++++++++++++++
2 files changed, 32 insertions(+), 2 deletions(-)

diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 329b8c8ca703..8c92122ded9b 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1161,11 +1161,19 @@ static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
* invalidate the entire TLB which is not desitable.
* e.g. see arch/arc: flush_pmd_tlb_range
*/
-#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
-#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
+extern void flush_pte_tlb_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end);
+extern void flush_pmd_tlb_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end);
+extern void flush_pud_tlb_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end);
+extern void flush_p4d_tlb_range(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long end);
#else
+#define flush_pte_tlb_range(vma, addr, end) BUILD_BUG()
#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
+#define flush_p4d_tlb_range(vma, addr, end) BUILD_BUG()
#endif
#endif

diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 3d7c01e76efc..3eff199d3507 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -101,6 +101,28 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,

#ifdef CONFIG_TRANSPARENT_HUGEPAGE

+#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+
+#define FLUSH_Pxx_TLB_RANGE(_pxx) \
+void flush_##_pxx##_tlb_range(struct vm_area_struct *vma, \
+ unsigned long addr, unsigned long end) \
+{ \
+ struct mmu_gather tlb; \
+ \
+ tlb_gather_mmu(&tlb, vma->vm_mm, addr, end); \
+ tlb_start_vma(&tlb, vma); \
+ tlb_flush_##_pxx##_range(&tlb, addr, end - addr); \
+ tlb_end_vma(&tlb, vma); \
+ tlb_finish_mmu(&tlb, addr, end); \
+}
+
+FLUSH_Pxx_TLB_RANGE(pte)
+FLUSH_Pxx_TLB_RANGE(pmd)
+FLUSH_Pxx_TLB_RANGE(pud)
+FLUSH_Pxx_TLB_RANGE(p4d)
+
+#endif /* __HAVE_ARCH_FLUSH_PMD_TLB_RANGE */
+
#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
--
2.19.1