[PATCH 3/7] x86/percpu: Clean up percpu_from_op()

From: Brian Gerst
Date: Sun May 17 2020 - 11:29:40 EST


The core percpu macros already have a switch on the data size, so the switch
in the x86 code is redundant and produces more dead code.

Also use appropriate types for the width of the instructions. This avoids
errors when compiling with Clang.

Signed-off-by: Brian Gerst <brgerst@xxxxxxxxx>
---
arch/x86/include/asm/percpu.h | 50 +++++++++++------------------------
1 file changed, 15 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 233c7a78d1a6..93f33202492d 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -190,33 +190,13 @@ do { \
} \
} while (0)

-#define percpu_from_op(qual, op, var) \
-({ \
- typeof(var) pfo_ret__; \
- switch (sizeof(var)) { \
- case 1: \
- asm qual (op "b "__percpu_arg(1)",%0" \
- : "=q" (pfo_ret__) \
- : "m" (var)); \
- break; \
- case 2: \
- asm qual (op "w "__percpu_arg(1)",%0" \
- : "=r" (pfo_ret__) \
- : "m" (var)); \
- break; \
- case 4: \
- asm qual (op "l "__percpu_arg(1)",%0" \
- : "=r" (pfo_ret__) \
- : "m" (var)); \
- break; \
- case 8: \
- asm qual (op "q "__percpu_arg(1)",%0" \
- : "=r" (pfo_ret__) \
- : "m" (var)); \
- break; \
- default: __bad_percpu_size(); \
- } \
- pfo_ret__; \
+#define percpu_from_op(size, qual, op, _var) \
+({ \
+ __pcpu_type_##size pfo_val__; \
+ asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \
+ : [val] __pcpu_reg_##size("=", pfo_val__) \
+ : [var] "m" (_var)); \
+ (typeof(_var))(unsigned long) pfo_val__; \
})

#define percpu_stable_op(op, var) \
@@ -401,9 +381,9 @@ do { \
*/
#define this_cpu_read_stable(var) percpu_stable_op("mov", var)

-#define raw_cpu_read_1(pcp) percpu_from_op(, "mov", pcp)
-#define raw_cpu_read_2(pcp) percpu_from_op(, "mov", pcp)
-#define raw_cpu_read_4(pcp) percpu_from_op(, "mov", pcp)
+#define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp)
+#define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp)
+#define raw_cpu_read_4(pcp) percpu_from_op(4, , "mov", pcp)

#define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val)
#define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val)
@@ -433,9 +413,9 @@ do { \
#define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val)
#define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val)

-#define this_cpu_read_1(pcp) percpu_from_op(volatile, "mov", pcp)
-#define this_cpu_read_2(pcp) percpu_from_op(volatile, "mov", pcp)
-#define this_cpu_read_4(pcp) percpu_from_op(volatile, "mov", pcp)
+#define this_cpu_read_1(pcp) percpu_from_op(1, volatile, "mov", pcp)
+#define this_cpu_read_2(pcp) percpu_from_op(2, volatile, "mov", pcp)
+#define this_cpu_read_4(pcp) percpu_from_op(4, volatile, "mov", pcp)
#define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val)
#define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val)
#define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val)
@@ -488,7 +468,7 @@ do { \
* 32 bit must fall back to generic operations.
*/
#ifdef CONFIG_X86_64
-#define raw_cpu_read_8(pcp) percpu_from_op(, "mov", pcp)
+#define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp)
#define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val)
#define raw_cpu_add_8(pcp, val) percpu_add_op(, (pcp), val)
#define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val)
@@ -497,7 +477,7 @@ do { \
#define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval)
#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval)

-#define this_cpu_read_8(pcp) percpu_from_op(volatile, "mov", pcp)
+#define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp)
#define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val)
#define this_cpu_add_8(pcp, val) percpu_add_op(volatile, (pcp), val)
#define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val)
--
2.25.4