[tip:x86/urgent] x86/cpufeature: Guard asm_volatile_goto usage for BPF compilation
From: tip-bot for Alexei Starovoitov
Date: Sun May 13 2018 - 15:51:57 EST
Commit-ID: b1ae32dbab50ed19cfc16d225b0fb0114fb13025
Gitweb: https://git.kernel.org/tip/b1ae32dbab50ed19cfc16d225b0fb0114fb13025
Author: Alexei Starovoitov <ast@xxxxxxxxxx>
AuthorDate: Sun, 13 May 2018 12:32:22 -0700
Committer: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
CommitDate: Sun, 13 May 2018 21:49:14 +0200
x86/cpufeature: Guard asm_volatile_goto usage for BPF compilation
Workaround for the sake of BPF compilation which utilizes kernel
headers, but clang does not support ASM GOTO and fails the build.
Fixes: d0266046ad54 ("x86: Remove FAST_FEATURE_TESTS")
Suggested-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Alexei Starovoitov <ast@xxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: daniel@xxxxxxxxxxxxx
Cc: peterz@xxxxxxxxxxxxx
Cc: netdev@xxxxxxxxxxxxxxx
Cc: bp@xxxxxxxxx
Cc: yhs@xxxxxx
Cc: kernel-team@xxxxxx
Cc: torvalds@xxxxxxxxxxxxxxxxxxxx
Cc: davem@xxxxxxxxxxxxx
Link: https://lkml.kernel.org/r/20180513193222.1997938-1-ast@xxxxxxxxxx
---
arch/x86/include/asm/cpufeature.h | 15 +++++++++++++++
samples/bpf/Makefile | 2 +-
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index b27da9602a6d..aced6c9290d6 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -140,6 +140,20 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
+#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO)
+
+/*
+ * Workaround for the sake of BPF compilation which utilizes kernel
+ * headers, but clang does not support ASM GOTO and fails the build.
+ */
+#ifndef __BPF_TRACING__
+#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
+#endif
+
+#define static_cpu_has(bit) boot_cpu_has(bit)
+
+#else
+
/*
* Static testing of CPU features. Used the same as boot_cpu_has().
* These will statically patch the target code for additional
@@ -195,6 +209,7 @@ t_no:
boot_cpu_has(bit) : \
_static_cpu_has(bit) \
)
+#endif
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4d6a6edd4bf6..092947676143 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -255,7 +255,7 @@ $(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h
$(obj)/%.o: $(src)/%.c
$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
-I$(srctree)/tools/testing/selftests/bpf/ \
- -D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
+ -D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
-D__TARGET_ARCH_$(ARCH) -Wno-compare-distinct-pointer-types \
-Wno-gnu-variable-sized-type-not-at-end \
-Wno-address-of-packed-member -Wno-tautological-compare \