Re: [PATCH 3/5] x86_64: rename clear_page() and copy_user() variants

From: Borislav Petkov
Date: Fri May 05 2017 - 13:01:03 EST


On Wed, Apr 26, 2017 at 09:30:47PM +0300, Alexey Dobriyan wrote:
> Patch changes market-ish acronyms like ERMS and chatty names
> to consistent and shorter versions:
>
> xxx_mov
> xxx_rep_stosq xxx_rep_movsq
> xxx_rep_stosb xxx_rep_movsb
>
> Signed-off-by: Alexey Dobriyan <adobriyan@xxxxxxxxx>
> ---
>
> arch/x86/include/asm/page_64.h | 12 ++++++------
> arch/x86/include/asm/uaccess_64.h | 18 +++++++++---------
> arch/x86/lib/clear_page_64.S | 18 +++++++++---------
> arch/x86/lib/copy_user_64.S | 20 ++++++++++----------
> tools/perf/ui/browsers/annotate.c | 2 +-
> 5 files changed, 35 insertions(+), 35 deletions(-)
>
> --- a/arch/x86/include/asm/page_64.h
> +++ b/arch/x86/include/asm/page_64.h
> @@ -35,15 +35,15 @@ extern unsigned long __phys_addr_symbol(unsigned long);
> #define pfn_valid(pfn) ((pfn) < max_pfn)
> #endif
>
> -void clear_page_orig(void *page);
> -void clear_page_rep(void *page);
> -void clear_page_erms(void *page);
> +void clear_page_mov(void *page);
> +void clear_page_rep_stosq(void *page);
> +void clear_page_rep_stosb(void *page);
>
> static inline void clear_page(void *page)
> {
> - alternative_call_2(clear_page_orig,
> - clear_page_rep, X86_FEATURE_REP_GOOD,
> - clear_page_erms, X86_FEATURE_ERMS,
> + alternative_call_2(clear_page_mov,
> + clear_page_rep_stosq, X86_FEATURE_REP_GOOD,
> + clear_page_rep_stosb, X86_FEATURE_ERMS,
> "=D" (page),
> "0" (page)
> : "memory", "rax", "rcx");
> --- a/arch/x86/include/asm/uaccess_64.h
> +++ b/arch/x86/include/asm/uaccess_64.h
> @@ -18,11 +18,11 @@
>
> /* Handles exceptions in both to and from, but doesn't do access_ok */
> __must_check unsigned long
> -copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
> +copy_user_rep_movsb(void *to, const void *from, unsigned len);

WARNING: Prefer 'unsigned int' to bare use of 'unsigned'
#62: FILE: arch/x86/include/asm/uaccess_64.h:21:
+copy_user_rep_movsb(void *to, const void *from, unsigned len);

Pls convert them while at it.

> __must_check unsigned long
> -copy_user_generic_string(void *to, const void *from, unsigned len);
> +copy_user_rep_movsq(void *to, const void *from, unsigned len);
> __must_check unsigned long
> -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
> +copy_user_mov(void *to, const void *from, unsigned len);
>
> static __always_inline __must_check unsigned long
> copy_user_generic(void *to, const void *from, unsigned len)
> @@ -30,14 +30,14 @@ copy_user_generic(void *to, const void *from, unsigned len)
> unsigned ret;
>
> /*
> - * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
> - * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
> - * Otherwise, use copy_user_generic_unrolled.
> + * If CPU has ERMS feature, use copy_user_rep_movsb.
> + * Otherwise, if CPU has rep_good feature, use copy_user_rep_movsq.

REP_GOOD, while you're at it. Also, end function names with ().

> + * Otherwise, use copy_user_mov.
> */
> - alternative_call_2(copy_user_generic_unrolled,
> - copy_user_generic_string,
> + alternative_call_2(copy_user_mov,
> + copy_user_rep_movsq,
> X86_FEATURE_REP_GOOD,
> - copy_user_enhanced_fast_string,
> + copy_user_rep_movsb,
> X86_FEATURE_ERMS,
> ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
> "=d" (len)),
> --- a/arch/x86/lib/clear_page_64.S
> +++ b/arch/x86/lib/clear_page_64.S
> @@ -14,15 +14,15 @@
> * Zero a page.
> * %rdi - page
> */
> -ENTRY(clear_page_rep)
> +ENTRY(clear_page_rep_stosq)
> movl $4096/8,%ecx
> xorl %eax,%eax
> rep stosq
> ret
> -ENDPROC(clear_page_rep)
> -EXPORT_SYMBOL_GPL(clear_page_rep)
> +ENDPROC(clear_page_rep_stosq)
> +EXPORT_SYMBOL_GPL(clear_page_rep_stosq)
>
> -ENTRY(clear_page_orig)
> +ENTRY(clear_page_mov)
> xorl %eax,%eax
> movl $4096/64,%ecx
> .p2align 4
> @@ -41,13 +41,13 @@ ENTRY(clear_page_orig)
> jnz .Lloop
> nop
> ret
> -ENDPROC(clear_page_orig)
> -EXPORT_SYMBOL_GPL(clear_page_orig)
> +ENDPROC(clear_page_mov)
> +EXPORT_SYMBOL_GPL(clear_page_mov)

Same issue as with the previous patch: _orig was dumb but since you're
changing the names, pls change them to something more descriptive.

> -ENTRY(clear_page_erms)
> +ENTRY(clear_page_rep_stosb)
> movl $4096,%ecx
> xorl %eax,%eax
> rep stosb
> ret
> -ENDPROC(clear_page_erms)
> -EXPORT_SYMBOL_GPL(clear_page_erms)
> +ENDPROC(clear_page_rep_stosb)
> +EXPORT_SYMBOL_GPL(clear_page_rep_stosb)
> --- a/arch/x86/lib/copy_user_64.S
> +++ b/arch/x86/lib/copy_user_64.S
> @@ -17,7 +17,7 @@
> #include <asm/export.h>
>
> /*
> - * copy_user_generic_unrolled - memory copy with exception handling.
> + * copy_user_mov - memory copy with exception handling.

This rename is actually losing information from the function name:
"generic_unrolled" explains exactly what the function does.

> * This version is for CPUs like P4 that don't have efficient micro
> * code for rep movsq
> *

--
Regards/Gruss,
Boris.

Good mailing practices for 400: avoid top-posting and trim the reply.