Re: [PATCH v4 1/9] powerpc/mmu_gather: Enable RCU_TABLE_FREE even for !SMP case

From: Michael Ellerman
Date: Sat Jan 18 2020 - 06:01:15 EST


"Aneesh Kumar K.V" <aneesh.kumar@xxxxxxxxxxxxx> writes:
> A follow up patch is going to make sure we correctly invalidate page walk cache
> before we free page table pages. In order to keep things simple enable
> RCU_TABLE_FREE even for !SMP so that we don't have to fixup the !SMP case
> differently in the followup patch
>
> !SMP case is right now broken for radix translation w.r.t page walk cache flush.
> We can get interrupted in between page table free and that would imply we
> have page walk cache entries pointing to tables which got freed already.

For the archives, both our platforms that run on Power9 force SMP on in
Kconfig, so the !SMP case is unlikely to be a problem for anyone in
practice, unless they've hacked their kernel to build it !SMP.

> Cc: <stable@xxxxxxxxxxxxxxx>
> Acked-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxx>
> ---
> arch/powerpc/Kconfig | 2 +-
> arch/powerpc/include/asm/book3s/32/pgalloc.h | 8 --------
> arch/powerpc/include/asm/book3s/64/pgalloc.h | 2 --
> arch/powerpc/include/asm/nohash/pgalloc.h | 8 --------
> arch/powerpc/mm/book3s64/pgtable.c | 7 -------
> 5 files changed, 1 insertion(+), 26 deletions(-)

Acked-by: Michael Ellerman <mpe@xxxxxxxxxxxxxx>

cheers

> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 1ec34e16ed65..04240205f38c 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -222,7 +222,7 @@ config PPC
> select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
> select HAVE_PERF_REGS
> select HAVE_PERF_USER_STACK_DUMP
> - select HAVE_RCU_TABLE_FREE if SMP
> + select HAVE_RCU_TABLE_FREE
> select HAVE_RCU_TABLE_NO_INVALIDATE if HAVE_RCU_TABLE_FREE
> select HAVE_MMU_GATHER_PAGE_SIZE
> select HAVE_REGS_AND_STACK_ACCESS_API
> diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
> index 998317702630..dc5c039eb28e 100644
> --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
> +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
> @@ -49,7 +49,6 @@ static inline void pgtable_free(void *table, unsigned index_size)
>
> #define get_hugepd_cache_index(x) (x)
>
> -#ifdef CONFIG_SMP
> static inline void pgtable_free_tlb(struct mmu_gather *tlb,
> void *table, int shift)
> {
> @@ -66,13 +65,6 @@ static inline void __tlb_remove_table(void *_table)
>
> pgtable_free(table, shift);
> }
> -#else
> -static inline void pgtable_free_tlb(struct mmu_gather *tlb,
> - void *table, int shift)
> -{
> - pgtable_free(table, shift);
> -}
> -#endif
>
> static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
> unsigned long address)
> diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
> index f6968c811026..a41e91bd0580 100644
> --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
> +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
> @@ -19,9 +19,7 @@ extern struct vmemmap_backing *vmemmap_list;
> extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
> extern void pmd_fragment_free(unsigned long *);
> extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
> -#ifdef CONFIG_SMP
> extern void __tlb_remove_table(void *_table);
> -#endif
> void pte_frag_destroy(void *pte_frag);
>
> static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
> diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
> index 332b13b4ecdb..29c43665a753 100644
> --- a/arch/powerpc/include/asm/nohash/pgalloc.h
> +++ b/arch/powerpc/include/asm/nohash/pgalloc.h
> @@ -46,7 +46,6 @@ static inline void pgtable_free(void *table, int shift)
>
> #define get_hugepd_cache_index(x) (x)
>
> -#ifdef CONFIG_SMP
> static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
> {
> unsigned long pgf = (unsigned long)table;
> @@ -64,13 +63,6 @@ static inline void __tlb_remove_table(void *_table)
> pgtable_free(table, shift);
> }
>
> -#else
> -static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
> -{
> - pgtable_free(table, shift);
> -}
> -#endif
> -
> static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
> unsigned long address)
> {
> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
> index 75483b40fcb1..2bf7e1b4fd82 100644
> --- a/arch/powerpc/mm/book3s64/pgtable.c
> +++ b/arch/powerpc/mm/book3s64/pgtable.c
> @@ -378,7 +378,6 @@ static inline void pgtable_free(void *table, int index)
> }
> }
>
> -#ifdef CONFIG_SMP
> void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
> {
> unsigned long pgf = (unsigned long)table;
> @@ -395,12 +394,6 @@ void __tlb_remove_table(void *_table)
>
> return pgtable_free(table, index);
> }
> -#else
> -void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
> -{
> - return pgtable_free(table, index);
> -}
> -#endif
>
> #ifdef CONFIG_PROC_FS
> atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
> --
> 2.24.1