[PATCH 12/15] x86/static_call: Add out-of-line static call implementation

From: Peter Zijlstra
Date: Wed Jun 05 2019 - 09:27:24 EST


From: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>

Add the x86 out-of-line static call implementation. For each key, a
permanent trampoline is created which is the destination for all static
calls for the given key. The trampoline has a direct jump which gets
patched by static_call_update() when the destination function changes.

Cc: x86@xxxxxxxxxx
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: Julia Cartwright <julia@xxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx>
Cc: Jason Baron <jbaron@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Jiri Kosina <jkosina@xxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Masami Hiramatsu <mhiramat@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: David Laight <David.Laight@xxxxxxxxxx>
Cc: Jessica Yu <jeyu@xxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Signed-off-by: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Link: https://lkml.kernel.org/r/00b08f2194e80241decbf206624b6580b9b8855b.1543200841.git.jpoimboe@xxxxxxxxxx
---
arch/x86/Kconfig | 1
arch/x86/include/asm/static_call.h | 28 +++++++++++++++++++++++++++
arch/x86/kernel/Makefile | 1
arch/x86/kernel/static_call.c | 38 +++++++++++++++++++++++++++++++++++++
4 files changed, 68 insertions(+)
create mode 100644 arch/x86/include/asm/static_call.h
create mode 100644 arch/x86/kernel/static_call.c

--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -198,6 +198,7 @@ config X86
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
+ select HAVE_STATIC_CALL
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK
--- /dev/null
+++ b/arch/x86/include/asm/static_call.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_STATIC_CALL_H
+#define _ASM_STATIC_CALL_H
+
+/*
+ * Manually construct a 5-byte direct JMP to prevent the assembler from
+ * optimizing it into a 2-byte JMP.
+ */
+#define __ARCH_STATIC_CALL_JMP_LABEL(key) ".L" __stringify(key ## _after_jmp)
+#define __ARCH_STATIC_CALL_TRAMP_JMP(key, func) \
+ ".byte 0xe9 \n" \
+ ".long " #func " - " __ARCH_STATIC_CALL_JMP_LABEL(key) "\n" \
+ __ARCH_STATIC_CALL_JMP_LABEL(key) ":"
+
+/*
+ * This is a permanent trampoline which does a direct jump to the function.
+ * The direct jump get patched by static_call_update().
+ */
+#define ARCH_DEFINE_STATIC_CALL_TRAMP(key, func) \
+ asm(".pushsection .text, \"ax\" \n" \
+ ".align 4 \n" \
+ ".globl " STATIC_CALL_TRAMP_STR(key) " \n" \
+ ".type " STATIC_CALL_TRAMP_STR(key) ", @function \n" \
+ STATIC_CALL_TRAMP_STR(key) ": \n" \
+ __ARCH_STATIC_CALL_TRAMP_JMP(key, func) " \n" \
+ ".popsection \n")
+
+#endif /* _ASM_STATIC_CALL_H */
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -63,6 +63,7 @@ obj-y += tsc.o tsc_msr.o io_delay.o rt
obj-y += pci-iommu_table.o
obj-y += resource.o
obj-y += irqflags.o
+obj-y += static_call.o

obj-y += process.o
obj-y += fpu/
--- /dev/null
+++ b/arch/x86/kernel/static_call.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/static_call.h>
+#include <linux/memory.h>
+#include <linux/bug.h>
+#include <asm/text-patching.h>
+#include <asm/nospec-branch.h>
+
+#define CALL_INSN_SIZE 5
+
+void arch_static_call_transform(void *site, void *tramp, void *func)
+{
+ unsigned char opcodes[CALL_INSN_SIZE];
+ unsigned char insn_opcode;
+ unsigned long insn;
+ s32 dest_relative;
+
+ mutex_lock(&text_mutex);
+
+ insn = (unsigned long)tramp;
+
+ insn_opcode = *(unsigned char *)insn;
+ if (insn_opcode != 0xE9) {
+ WARN_ONCE(1, "unexpected static call insn opcode 0x%x at %pS",
+ insn_opcode, (void *)insn);
+ goto unlock;
+ }
+
+ dest_relative = (long)(func) - (long)(insn + CALL_INSN_SIZE);
+
+ opcodes[0] = insn_opcode;
+ memcpy(&opcodes[1], &dest_relative, CALL_INSN_SIZE - 1);
+
+ text_poke_bp((void *)insn, opcodes, CALL_INSN_SIZE, NULL);
+
+unlock:
+ mutex_unlock(&text_mutex);
+}
+EXPORT_SYMBOL_GPL(arch_static_call_transform);