Re: [PATCH v2] powerpc/mm: mark more tlb functions as __always_inline

From: Masahiro Yamada
Date: Sun Jun 23 2019 - 10:13:55 EST


On Tue, May 21, 2019 at 10:19 PM Masahiro Yamada
<yamada.masahiro@xxxxxxxxxxxxx> wrote:
>
> With CONFIG_OPTIMIZE_INLINING enabled, Laura Abbott reported error
> with gcc 9.1.1:
>
> arch/powerpc/mm/book3s64/radix_tlb.c: In function '_tlbiel_pid':
> arch/powerpc/mm/book3s64/radix_tlb.c:104:2: warning: asm operand 3 probably doesn't match constraints
> 104 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
> | ^~~
> arch/powerpc/mm/book3s64/radix_tlb.c:104:2: error: impossible constraint in 'asm'
>
> Fixing _tlbiel_pid() is enough to address the warning above, but I
> inlined more functions to fix all potential issues.
>
> To meet the "i" (immediate) constraint for the asm operands, functions
> propagating "ric" must be always inlined.
>
> Fixes: 9012d011660e ("compiler: allow all arches to enable CONFIG_OPTIMIZE_INLINING")
> Reported-by: Laura Abbott <labbott@xxxxxxxxxx>
> Signed-off-by: Masahiro Yamada <yamada.masahiro@xxxxxxxxxxxxx>
> ---

Ping.
This missed the recent PR, but
I believe this should be fixed.

Thanks.

>
> Changes in v2:
> - Do not split lines
>
> arch/powerpc/mm/book3s64/hash_native.c | 2 +-
> arch/powerpc/mm/book3s64/radix_tlb.c | 32 ++++++++++++++++----------------
> 2 files changed, 17 insertions(+), 17 deletions(-)
>
> diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
> index aaa28fd..c854151 100644
> --- a/arch/powerpc/mm/book3s64/hash_native.c
> +++ b/arch/powerpc/mm/book3s64/hash_native.c
> @@ -60,7 +60,7 @@ static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
> * tlbiel instruction for hash, set invalidation
> * i.e., r=1 and is=01 or is=10 or is=11
> */
> -static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
> +static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
> unsigned int pid,
> unsigned int ric, unsigned int prs)
> {
> diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
> index 4d84136..4d3dc10 100644
> --- a/arch/powerpc/mm/book3s64/radix_tlb.c
> +++ b/arch/powerpc/mm/book3s64/radix_tlb.c
> @@ -29,7 +29,7 @@
> * tlbiel instruction for radix, set invalidation
> * i.e., r=1 and is=01 or is=10 or is=11
> */
> -static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
> +static __always_inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
> unsigned int pid,
> unsigned int ric, unsigned int prs)
> {
> @@ -150,8 +150,8 @@ static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
> trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
> }
>
> -static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
> - unsigned long ric)
> +static __always_inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
> + unsigned long ric)
> {
> unsigned long rb,rs,prs,r;
>
> @@ -167,8 +167,8 @@ static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
> }
>
>
> -static inline void __tlbiel_va(unsigned long va, unsigned long pid,
> - unsigned long ap, unsigned long ric)
> +static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
> + unsigned long ap, unsigned long ric)
> {
> unsigned long rb,rs,prs,r;
>
> @@ -183,8 +183,8 @@ static inline void __tlbiel_va(unsigned long va, unsigned long pid,
> trace_tlbie(0, 1, rb, rs, ric, prs, r);
> }
>
> -static inline void __tlbie_va(unsigned long va, unsigned long pid,
> - unsigned long ap, unsigned long ric)
> +static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
> + unsigned long ap, unsigned long ric)
> {
> unsigned long rb,rs,prs,r;
>
> @@ -199,8 +199,8 @@ static inline void __tlbie_va(unsigned long va, unsigned long pid,
> trace_tlbie(0, 0, rb, rs, ric, prs, r);
> }
>
> -static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
> - unsigned long ap, unsigned long ric)
> +static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
> + unsigned long ap, unsigned long ric)
> {
> unsigned long rb,rs,prs,r;
>
> @@ -239,7 +239,7 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
> /*
> * We use 128 set in radix mode and 256 set in hpt mode.
> */
> -static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
> +static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
> {
> int set;
>
> @@ -341,7 +341,7 @@ static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
> asm volatile("eieio; tlbsync; ptesync": : :"memory");
> }
>
> -static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
> +static __always_inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
> {
> int set;
>
> @@ -381,8 +381,8 @@ static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
> __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
> }
>
> -static inline void _tlbiel_va(unsigned long va, unsigned long pid,
> - unsigned long psize, unsigned long ric)
> +static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
> + unsigned long psize, unsigned long ric)
> {
> unsigned long ap = mmu_get_ap(psize);
>
> @@ -413,8 +413,8 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
> __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
> }
>
> -static inline void _tlbie_va(unsigned long va, unsigned long pid,
> - unsigned long psize, unsigned long ric)
> +static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
> + unsigned long psize, unsigned long ric)
> {
> unsigned long ap = mmu_get_ap(psize);
>
> @@ -424,7 +424,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
> asm volatile("eieio; tlbsync; ptesync": : :"memory");
> }
>
> -static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
> +static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
> unsigned long psize, unsigned long ric)
> {
> unsigned long ap = mmu_get_ap(psize);
> --
> 2.7.4
>


--
Best Regards
Masahiro Yamada