[PATCH 3/5] x86_64: rename clear_page() and copy_user() variants

From: Alexey Dobriyan
Date: Wed Apr 26 2017 - 14:31:07 EST


Patch changes market-ish acronyms like ERMS and chatty names
to consistent and shorter versions:

xxx_mov
xxx_rep_stosq xxx_rep_movsq
xxx_rep_stosb xxx_rep_movsb

Signed-off-by: Alexey Dobriyan <adobriyan@xxxxxxxxx>
---

arch/x86/include/asm/page_64.h | 12 ++++++------
arch/x86/include/asm/uaccess_64.h | 18 +++++++++---------
arch/x86/lib/clear_page_64.S | 18 +++++++++---------
arch/x86/lib/copy_user_64.S | 20 ++++++++++----------
tools/perf/ui/browsers/annotate.c | 2 +-
5 files changed, 35 insertions(+), 35 deletions(-)

--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -35,15 +35,15 @@ extern unsigned long __phys_addr_symbol(unsigned long);
#define pfn_valid(pfn) ((pfn) < max_pfn)
#endif

-void clear_page_orig(void *page);
-void clear_page_rep(void *page);
-void clear_page_erms(void *page);
+void clear_page_mov(void *page);
+void clear_page_rep_stosq(void *page);
+void clear_page_rep_stosb(void *page);

static inline void clear_page(void *page)
{
- alternative_call_2(clear_page_orig,
- clear_page_rep, X86_FEATURE_REP_GOOD,
- clear_page_erms, X86_FEATURE_ERMS,
+ alternative_call_2(clear_page_mov,
+ clear_page_rep_stosq, X86_FEATURE_REP_GOOD,
+ clear_page_rep_stosb, X86_FEATURE_ERMS,
"=D" (page),
"0" (page)
: "memory", "rax", "rcx");
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -18,11 +18,11 @@

/* Handles exceptions in both to and from, but doesn't do access_ok */
__must_check unsigned long
-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
+copy_user_rep_movsb(void *to, const void *from, unsigned len);
__must_check unsigned long
-copy_user_generic_string(void *to, const void *from, unsigned len);
+copy_user_rep_movsq(void *to, const void *from, unsigned len);
__must_check unsigned long
-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
+copy_user_mov(void *to, const void *from, unsigned len);

static __always_inline __must_check unsigned long
copy_user_generic(void *to, const void *from, unsigned len)
@@ -30,14 +30,14 @@ copy_user_generic(void *to, const void *from, unsigned len)
unsigned ret;

/*
- * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
- * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
- * Otherwise, use copy_user_generic_unrolled.
+ * If CPU has ERMS feature, use copy_user_rep_movsb.
+ * Otherwise, if CPU has rep_good feature, use copy_user_rep_movsq.
+ * Otherwise, use copy_user_mov.
*/
- alternative_call_2(copy_user_generic_unrolled,
- copy_user_generic_string,
+ alternative_call_2(copy_user_mov,
+ copy_user_rep_movsq,
X86_FEATURE_REP_GOOD,
- copy_user_enhanced_fast_string,
+ copy_user_rep_movsb,
X86_FEATURE_ERMS,
ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
"=d" (len)),
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -14,15 +14,15 @@
* Zero a page.
* %rdi - page
*/
-ENTRY(clear_page_rep)
+ENTRY(clear_page_rep_stosq)
movl $4096/8,%ecx
xorl %eax,%eax
rep stosq
ret
-ENDPROC(clear_page_rep)
-EXPORT_SYMBOL_GPL(clear_page_rep)
+ENDPROC(clear_page_rep_stosq)
+EXPORT_SYMBOL_GPL(clear_page_rep_stosq)

-ENTRY(clear_page_orig)
+ENTRY(clear_page_mov)
xorl %eax,%eax
movl $4096/64,%ecx
.p2align 4
@@ -41,13 +41,13 @@ ENTRY(clear_page_orig)
jnz .Lloop
nop
ret
-ENDPROC(clear_page_orig)
-EXPORT_SYMBOL_GPL(clear_page_orig)
+ENDPROC(clear_page_mov)
+EXPORT_SYMBOL_GPL(clear_page_mov)

-ENTRY(clear_page_erms)
+ENTRY(clear_page_rep_stosb)
movl $4096,%ecx
xorl %eax,%eax
rep stosb
ret
-ENDPROC(clear_page_erms)
-EXPORT_SYMBOL_GPL(clear_page_erms)
+ENDPROC(clear_page_rep_stosb)
+EXPORT_SYMBOL_GPL(clear_page_rep_stosb)
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -17,7 +17,7 @@
#include <asm/export.h>

/*
- * copy_user_generic_unrolled - memory copy with exception handling.
+ * copy_user_mov - memory copy with exception handling.
* This version is for CPUs like P4 that don't have efficient micro
* code for rep movsq
*
@@ -29,7 +29,7 @@
* Output:
* eax uncopied bytes or 0 if successful.
*/
-ENTRY(copy_user_generic_unrolled)
+ENTRY(copy_user_mov)
ASM_STAC
cmpl $8,%edx
jb 20f /* less then 8 bytes, go to byte copy loop */
@@ -111,8 +111,8 @@ ENTRY(copy_user_generic_unrolled)
_ASM_EXTABLE(19b,40b)
_ASM_EXTABLE(21b,50b)
_ASM_EXTABLE(22b,50b)
-ENDPROC(copy_user_generic_unrolled)
-EXPORT_SYMBOL(copy_user_generic_unrolled)
+ENDPROC(copy_user_mov)
+EXPORT_SYMBOL(copy_user_mov)

/* Some CPUs run faster using the string copy instructions.
* This is also a lot simpler. Use them when possible.
@@ -132,7 +132,7 @@ EXPORT_SYMBOL(copy_user_generic_unrolled)
* Output:
* eax uncopied bytes or 0 if successful.
*/
-ENTRY(copy_user_generic_string)
+ENTRY(copy_user_rep_movsq)
ASM_STAC
cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */
@@ -157,8 +157,8 @@ ENTRY(copy_user_generic_string)

_ASM_EXTABLE(1b,11b)
_ASM_EXTABLE(3b,12b)
-ENDPROC(copy_user_generic_string)
-EXPORT_SYMBOL(copy_user_generic_string)
+ENDPROC(copy_user_rep_movsq)
+EXPORT_SYMBOL(copy_user_rep_movsq)

/*
* Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
@@ -172,7 +172,7 @@ EXPORT_SYMBOL(copy_user_generic_string)
* Output:
* eax uncopied bytes or 0 if successful.
*/
-ENTRY(copy_user_enhanced_fast_string)
+ENTRY(copy_user_rep_movsb)
ASM_STAC
movl %edx,%ecx
1: rep
@@ -187,8 +187,8 @@ ENTRY(copy_user_enhanced_fast_string)
.previous

_ASM_EXTABLE(1b,12b)
-ENDPROC(copy_user_enhanced_fast_string)
-EXPORT_SYMBOL(copy_user_enhanced_fast_string)
+ENDPROC(copy_user_rep_movsb)
+EXPORT_SYMBOL(copy_user_rep_movsb)

/*
* copy_user_nocache - Uncached memory copy with exception handling
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -1084,7 +1084,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
* routines that comes with labels in the same column
* as the address in objdump, sigh.
*
- * E.g. copy_user_generic_unrolled
+ * E.g. copy_user_mov
*/
if (pos->offset < (s64)size)
browser.offsets[pos->offset] = pos;