[PATCH tip] x86/percpu: Use C for arch_raw_cpu_ptr()
From: Uros Bizjak
Date: Mon Oct 09 2023 - 16:31:49 EST
Implementing arch_raw_cpu_ptr() in C, allows the compiler to perform
better optimizations, such as setting an appropriate base to compute
the address instead of an add instruction.
E.g.: address calcuation in amd_pmu_enable_virt() improves from:
48 c7 c0 00 00 00 00 mov $0x0,%rax
87b7: R_X86_64_32S cpu_hw_events
65 48 03 05 00 00 00 add %gs:0x0(%rip),%rax
00
87bf: R_X86_64_PC32 this_cpu_off-0x4
48 c7 80 28 13 00 00 movq $0x0,0x1328(%rax)
00 00 00 00
to:
65 48 8b 05 00 00 00 mov %gs:0x0(%rip),%rax
00
8798: R_X86_64_PC32 this_cpu_off-0x4
48 c7 80 00 00 00 00 movq $0x0,0x0(%rax)
00 00 00 00
87a6: R_X86_64_32S cpu_hw_events+0x1328
Co-developed-by: Nadav Amit <namit@xxxxxxxxxx>
Signed-off-by: Nadav Amit <namit@xxxxxxxxxx>
Signed-off-by: Uros Bizjak <ubizjak@xxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Brian Gerst <brgerst@xxxxxxxxx>
Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
---
arch/x86/include/asm/percpu.h | 23 ++++++++++++++++++-----
1 file changed, 18 insertions(+), 5 deletions(-)
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 60ea7755c0fe..aa48c061abd9 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -39,16 +39,23 @@
#define __percpu_prefix ""
+/*
+ * Efficient implementation for cases in which the compiler supports
+ * named address spaces. Allows the compiler to perform additional
+ * optimizations that can save more instructions.
+ */
+#define arch_raw_cpu_ptr(ptr) \
+({ \
+ unsigned long tcp_ptr__; \
+ tcp_ptr__ = __raw_cpu_read(, this_cpu_off) + (unsigned long)(ptr); \
+ (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
+})
+
#else /* CONFIG_CC_HAS_NAMED_AS */
#define __percpu_seg_override
#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
-#endif /* CONFIG_CC_HAS_NAMED_AS */
-
-#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
-#define __my_cpu_offset this_cpu_read(this_cpu_off)
-
/*
* Compared to the generic __my_cpu_offset version, the following
* saves one instruction and avoids clobbering a temp register.
@@ -61,6 +68,12 @@
: "m" (__my_cpu_var(this_cpu_off)), "0" (ptr)); \
(typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
})
+
+#endif /* CONFIG_CC_HAS_NAMED_AS */
+
+#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
+#define __my_cpu_offset this_cpu_read(this_cpu_off)
+
#else /* CONFIG_SMP */
#define __percpu_seg_override
#define __percpu_prefix ""
--
2.41.0