Re: [PATCH 6/7] x86/percpu: Clean up percpu_xchg_op()

From: Nick Desaulniers
Date: Mon May 18 2020 - 18:16:15 EST


On Sun, May 17, 2020 at 8:29 AM Brian Gerst <brgerst@xxxxxxxxx> wrote:
>
> The core percpu macros already have a switch on the data size, so the switch
> in the x86 code is redundant and produces more dead code.
>
> Also use appropriate types for the width of the instructions. This avoids
> errors when compiling with Clang.
>
> Signed-off-by: Brian Gerst <brgerst@xxxxxxxxx>

Reviewed-by: Nick Desaulniers <ndesaulniers@xxxxxxxxxx>

> ---
> arch/x86/include/asm/percpu.h | 61 +++++++++++------------------------
> 1 file changed, 18 insertions(+), 43 deletions(-)
>
> diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
> index ac8c391a190e..3c95ab3c99cd 100644
> --- a/arch/x86/include/asm/percpu.h
> +++ b/arch/x86/include/asm/percpu.h
> @@ -215,46 +215,21 @@ do { \
> * expensive due to the implied lock prefix. The processor cannot prefetch
> * cachelines if xchg is used.
> */
> -#define percpu_xchg_op(qual, var, nval) \
> +#define percpu_xchg_op(size, qual, _var, _nval) \
> ({ \
> - typeof(var) pxo_ret__; \
> - typeof(var) pxo_new__ = (nval); \
> - switch (sizeof(var)) { \
> - case 1: \
> - asm qual ("\n\tmov "__percpu_arg(1)",%%al" \
> - "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
> - "\n\tjnz 1b" \
> - : "=&a" (pxo_ret__), "+m" (var) \
> - : "q" (pxo_new__) \
> - : "memory"); \
> - break; \
> - case 2: \
> - asm qual ("\n\tmov "__percpu_arg(1)",%%ax" \
> - "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
> - "\n\tjnz 1b" \
> - : "=&a" (pxo_ret__), "+m" (var) \
> - : "r" (pxo_new__) \
> - : "memory"); \
> - break; \
> - case 4: \
> - asm qual ("\n\tmov "__percpu_arg(1)",%%eax" \
> - "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
> - "\n\tjnz 1b" \
> - : "=&a" (pxo_ret__), "+m" (var) \
> - : "r" (pxo_new__) \
> - : "memory"); \
> - break; \
> - case 8: \
> - asm qual ("\n\tmov "__percpu_arg(1)",%%rax" \
> - "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
> - "\n\tjnz 1b" \
> - : "=&a" (pxo_ret__), "+m" (var) \
> - : "r" (pxo_new__) \
> - : "memory"); \
> - break; \
> - default: __bad_percpu_size(); \
> - } \
> - pxo_ret__; \
> + __pcpu_type_##size pxo_old__; \
> + __pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval); \
> + asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), \
> + "%[oval]") \
> + "\n1:\t" \
> + __pcpu_op2_##size("cmpxchg", "%[nval]", \
> + __percpu_arg([var])) \
> + "\n\tjnz 1b" \
> + : [oval] "=&a" (pxo_old__), \
> + [var] "+m" (_var) \
> + : [nval] __pcpu_reg_##size(, pxo_new__) \
> + : "memory"); \
> + (typeof(_var))(unsigned long) pxo_old__; \
> })
>
> /*
> @@ -354,9 +329,9 @@ do { \
> #define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val)
> #define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val)
> #define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val)
> -#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(volatile, pcp, nval)
> -#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(volatile, pcp, nval)
> -#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(volatile, pcp, nval)
> +#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(1, volatile, pcp, nval)
> +#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(2, volatile, pcp, nval)
> +#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(4, volatile, pcp, nval)
>
> #define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val)
> #define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val)
> @@ -409,7 +384,7 @@ do { \
> #define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val)
> #define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val)
> #define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val)
> -#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(volatile, pcp, nval)
> +#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval)
> #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval)
>
> /*
> --
> 2.25.4
>


--
Thanks,
~Nick Desaulniers