[PATCH 07/20] riscv: move the TLB flush logic out of line

From: Christoph Hellwig
Date: Tue Sep 03 2019 - 05:33:02 EST


The TLB flush logic is going to become more complex. Start moving
it out of line.

Signed-off-by: Christoph Hellwig <hch@xxxxxx>
Reviewed-by: Atish Patra <atish.patra@xxxxxxx>
---
arch/riscv/include/asm/tlbflush.h | 37 ++++++-------------------------
arch/riscv/mm/Makefile | 3 +++
arch/riscv/mm/tlbflush.c | 35 +++++++++++++++++++++++++++++
3 files changed, 45 insertions(+), 30 deletions(-)
create mode 100644 arch/riscv/mm/tlbflush.c

diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index df31fe2ed09c..075a784c66c5 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -25,8 +25,13 @@ static inline void local_flush_tlb_page(unsigned long addr)
__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
}

-#ifndef CONFIG_SMP
-
+#ifdef CONFIG_SMP
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#else /* CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)

@@ -37,34 +42,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
}

#define flush_tlb_mm(mm) flush_tlb_all()
-
-#else /* CONFIG_SMP */
-
-#include <asm/sbi.h>
-
-static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
- unsigned long size)
-{
- struct cpumask hmask;
-
- riscv_cpuid_to_hartid_mask(cmask, &hmask);
- sbi_remote_sfence_vma(hmask.bits, start, size);
-}
-
-#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
-
-#define flush_tlb_range(vma, start, end) \
- remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long addr)
-{
- flush_tlb_range(vma, addr, addr + PAGE_SIZE);
-}
-
-#define flush_tlb_mm(mm) \
- remote_sfence_vma(mm_cpumask(mm), 0, -1)
-
#endif /* CONFIG_SMP */

/* Flush a range of kernel pages */
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 74055e1d6f21..9d9a17335686 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,4 +13,7 @@ obj-y += cacheflush.o
obj-y += context.o
obj-y += sifive_l2_cache.o

+ifeq ($(CONFIG_MMU),y)
+obj-$(CONFIG_SMP) += tlbflush.o
+endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
new file mode 100644
index 000000000000..df93b26f1b9d
--- /dev/null
+++ b/arch/riscv/mm/tlbflush.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <asm/sbi.h>
+
+void flush_tlb_all(void)
+{
+ sbi_remote_sfence_vma(NULL, 0, -1);
+}
+
+static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
+ unsigned long size)
+{
+ struct cpumask hmask;
+
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma(hmask.bits, start, size);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
+}
--
2.20.1