[PATCH 4.19 002/346] x86/lib: Change .weak to SYM_FUNC_START_WEAK for arch/x86/lib/mem*_64.S
From: Greg Kroah-Hartman
Date: Mon Dec 28 2020 - 11:14:20 EST
From: Fangrui Song <maskray@xxxxxxxxxx>
commit 4d6ffa27b8e5116c0abb318790fd01d4e12d75e6 upstream.
Commit
393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")
added .weak directives to arch/x86/lib/mem*_64.S instead of changing the
existing ENTRY macros to WEAK. This can lead to the assembly snippet
.weak memcpy
...
.globl memcpy
which will produce a STB_WEAK memcpy with GNU as but STB_GLOBAL memcpy
with LLVM's integrated assembler before LLVM 12. LLVM 12 (since
https://reviews.llvm.org/D90108) will error on such an overridden symbol
binding.
Commit
ef1e03152cb0 ("x86/asm: Make some functions local")
changed ENTRY in arch/x86/lib/memcpy_64.S to SYM_FUNC_START_LOCAL, which
was ineffective due to the preceding .weak directive.
Use the appropriate SYM_FUNC_START_WEAK instead.
Fixes: 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")
Fixes: ef1e03152cb0 ("x86/asm: Make some functions local")
Reported-by: Sami Tolvanen <samitolvanen@xxxxxxxxxx>
Signed-off-by: Fangrui Song <maskray@xxxxxxxxxx>
Signed-off-by: Borislav Petkov <bp@xxxxxxx>
Reviewed-by: Nick Desaulniers <ndesaulniers@xxxxxxxxxx>
Tested-by: Nathan Chancellor <natechancellor@xxxxxxxxx>
Tested-by: Nick Desaulniers <ndesaulniers@xxxxxxxxxx>
Cc: <stable@xxxxxxxxxxxxxxx>
Link: https://lkml.kernel.org/r/20201103012358.168682-1-maskray@xxxxxxxxxx
[nd: backport due to missing
commit e9b9d020c487 ("x86/asm: Annotate aliases")
commit ffedeeb780dc ("linkage: Introduce new macros for assembler symbols")]
Signed-off-by: Nick Desaulniers <ndesaulniers@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/x86/lib/memcpy_64.S | 6 +++---
arch/x86/lib/memmove_64.S | 4 ++--
arch/x86/lib/memset_64.S | 6 +++---
3 files changed, 8 insertions(+), 8 deletions(-)
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -14,8 +14,6 @@
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
*/
-.weak memcpy
-
/*
* memcpy - Copy a memory block.
*
@@ -28,7 +26,9 @@
* rax original destination
*/
ENTRY(__memcpy)
-ENTRY(memcpy)
+.weak memcpy
+.p2align 4, 0x90
+memcpy:
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -25,8 +25,8 @@
* rax: dest
*/
.weak memmove
-
-ENTRY(memmove)
+.p2align 4, 0x90
+memmove:
ENTRY(__memmove)
/* Handle more 32 bytes in loop */
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -6,8 +6,6 @@
#include <asm/alternative-asm.h>
#include <asm/export.h>
-.weak memset
-
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
* string to get better performance than the original function. The code is
@@ -19,7 +17,9 @@
*
* rax original destination
*/
-ENTRY(memset)
+.weak memset
+.p2align 4, 0x90
+memset:
ENTRY(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended