[PATCH v5] arm64: implement ftrace with regs

From: Torsten Duwe
Date: Fri Nov 30 2018 - 11:20:20 EST


Use -fpatchable-function-entry (gcc8) to add 2 NOPs at the beginning
of each function. Replace the first NOP thus generated with a quick LR
saver (move it to scratch reg x9), so the 2nd replacement insn, the call
to ftrace, does not clobber the value. Ftrace will then generate the
standard stack frames.

Note that patchable-function-entry in GCC disables IPA-RA, which means
ABI register calling conventions are obeyed *and* scratch registers
such as x9 are available.

Introduce and handle an ftrace_regs_trampoline for module PLTs, together
with ftrace_trampoline, and double the size of this special section
if .text.ftrace_trampoline is present in the module.

Signed-off-by: Torsten Duwe <duwe@xxxxxxx>

---

As reliable stack traces are still being discussed, here is
dynamic ftrace with regs only, which has a value of its own.
I can see Mark has broken down an earlier version into smaller
patches; just let me know if you prefer that.

[Changes from v4]

* include Ard's feedback and pending changes:
- ADR/ADRP veneers
- ftrace_trampolines[2]
- add a .req for fp, just in case
- comment on the pt_regs.stackframe[2] mapping
* include Mark's ftrace cleanup
- GLOBAL()
- prepare_ftrace_return(pc, &lr, fp)

---
arch/arm64/Kconfig | 2
arch/arm64/Makefile | 4 +
arch/arm64/include/asm/assembler.h | 1
arch/arm64/include/asm/ftrace.h | 13 +++
arch/arm64/include/asm/module.h | 3
arch/arm64/kernel/Makefile | 6 -
arch/arm64/kernel/entry-ftrace.S | 130 ++++++++++++++++++++++++++++++++++
arch/arm64/kernel/ftrace.c | 125 +++++++++++++++++++++++++-------
arch/arm64/kernel/module-plts.c | 3
arch/arm64/kernel/module.c | 2
drivers/firmware/efi/libstub/Makefile | 3
include/asm-generic/vmlinux.lds.h | 1
include/linux/compiler_types.h | 4 +
13 files changed, 261 insertions(+), 36 deletions(-)
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -125,6 +125,8 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS \
+ if $(cc-option,-fpatchable-function-entry=2)
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_TRACER
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -79,6 +79,10 @@ ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/arm64/kernel/module.lds
endif

+ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=2
+endif
+
# Default value
head-y := arch/arm64/kernel/head.o

--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -17,6 +17,19 @@
#define MCOUNT_ADDR ((unsigned long)_mcount)
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE

+/*
+ * DYNAMIC_FTRACE_WITH_REGS is implemented by adding 2 NOPs at the beginning
+ * of each function, with the second NOP actually calling ftrace. In contrary
+ * to a classic _mcount call, the call instruction to be modified is thus
+ * the second one, and not the only one.
+ */
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#define REC_IP_BRANCH_OFFSET AARCH64_INSN_SIZE
+#else
+#define REC_IP_BRANCH_OFFSET 0
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/compat.h>

--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -7,9 +7,9 @@ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
CFLAGS_armv8_deprecated.o := -I$(src)

-CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_insn.o = -pg
-CFLAGS_REMOVE_return_address.o = -pg
+CFLAGS_REMOVE_ftrace.o = -pg $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_insn.o = -pg $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_return_address.o = -pg $(CC_FLAGS_FTRACE)

# Object file lists.
arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -13,7 +13,8 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__K

# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
-cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
+cflags-$(CONFIG_ARM64) := $(filter-out -pg $(CC_FLAGS_FTRACE)\
+ ,$(KBUILD_CFLAGS)) -fpie \
$(DISABLE_STACKLEAK_PLUGIN)
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -13,6 +13,8 @@
#include <asm/assembler.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>

/*
* Gcc with -pg will put the following code in the beginning of each function:
@@ -122,6 +124,7 @@ skip_ftrace_call: // }
ENDPROC(_mcount)

#else /* CONFIG_DYNAMIC_FTRACE */
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/*
* _mcount() is used to build the kernel with -pg option, but all the branch
* instructions to _mcount() are replaced to NOP initially at kernel start up,
@@ -159,6 +162,124 @@ GLOBAL(ftrace_graph_call) // ftrace_gra

mcount_exit
ENDPROC(ftrace_caller)
+#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
+/*
+ * Since no -pg or similar compiler flag is used, there should really be
+ * no reference to _mcount; so do not define one. Only some value for
+ * MCOUNT_ADDR is needed for comparison. Let it point here to have some
+ * sort of magic value that can be recognised when debugging.
+ */
+GLOBAL(_mcount)
+ ret /* make it differ from regs caller */
+
+ENTRY(ftrace_regs_caller)
+ /* callee's preliminary stack frame: */
+ stp fp, x9, [sp, #-16]!
+ mov fp, sp
+
+ /* our stack frame: */
+ stp fp, lr, [sp, #-S_FRAME_SIZE]!
+ add x9, sp, #16 /* offset to pt_regs */
+
+ /* along with the _common code below, dump the complete
+ * register set for inspection.
+ */
+ stp x10, x11, [x9, #S_X10]
+ stp x12, x13, [x9, #S_X12]
+ stp x14, x15, [x9, #S_X14]
+ stp x16, x17, [x9, #S_X16]
+ stp x18, x19, [x9, #S_X18]
+ stp x20, x21, [x9, #S_X20]
+ stp x22, x23, [x9, #S_X22]
+ stp x24, x25, [x9, #S_X24]
+ stp x26, x27, [x9, #S_X26]
+
+ b ftrace_common
+ENDPROC(ftrace_regs_caller)
+
+ENTRY(ftrace_caller)
+ /* callee's preliminary stack frame: */
+ stp fp, x9, [sp, #-16]!
+ mov fp, sp
+
+ /* our stack frame: */
+ stp fp, lr, [sp, #-S_FRAME_SIZE]!
+ add x9, sp, #16 /* offset to pt_regs */
+
+ftrace_common:
+ /*
+ * At this point we have 2 new stack frames, and x9 pointing
+ * at a pt_regs which we can populate as needed. At least the
+ * argument registers need to be preserved, see
+ * ftrace_common_return below. pt_regs at x9 is layed out so
+ * that pt_regs.stackframe[] (last 16 bytes) maps to the
+ * preliminary frame we created for the callee.
+ */
+
+ /* save function arguments */
+ stp x0, x1, [x9]
+ stp x2, x3, [x9, #S_X2]
+ stp x4, x5, [x9, #S_X4]
+ stp x6, x7, [x9, #S_X6]
+ str x8, [x9, #S_X8]
+
+ ldr x0, [fp]
+ stp x28, x0, [x9, #S_X28] /* FP in pt_regs + "our" x28 */
+
+ /* The program counter just after the ftrace call site */
+ str lr, [x9, #S_PC]
+ /* The stack pointer as it was on ftrace_caller entry... */
+ add x28, fp, #16
+ str x28, [x9, #S_SP]
+ /* The link Register at callee entry */
+ ldr x28, [fp, 8]
+ str x28, [x9, #S_LR] /* to pt_regs.r[30] */
+
+ ldr_l x2, function_trace_op, x0
+ ldr x1, [fp, #8]
+ sub x0, lr, #8 /* function entry == IP */
+ mov x3, x9 /* pt_regs are @x9 */
+
+ mov fp, sp
+
+GLOBAL(ftrace_call)
+ bl ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
+ nop // If enabled, this will be replaced
+ // "b ftrace_graph_caller"
+#endif
+
+/*
+ * GCC's patchable-function-entry implicitly disables IPA-RA,
+ * so all non-argument registers are either scratch / dead
+ * or callee-saved (within the ftrace framework). Function
+ * arguments of the call we are intercepting right now however
+ * need to be preserved in any case.
+ */
+ftrace_common_return:
+ add x9, sp, #16 /* advance to pt_regs for restore */
+
+ ldp x0, x1, [x9]
+ ldp x2, x3, [x9, #S_X2]
+ ldp x4, x5, [x9, #S_X4]
+ ldp x6, x7, [x9, #S_X6]
+ ldr x8, [x9, #S_X8]
+
+ ldp x28, fp, [x9, #S_X28]
+
+ ldr lr, [x9, #S_LR]
+ ldr x9, [x9, #S_PC]
+ /* clean up both frames, ours and callee preliminary */
+ add sp, sp, #S_FRAME_SIZE + 16
+
+ ret x9
+
+ENDPROC(ftrace_caller)
+
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
#endif /* CONFIG_DYNAMIC_FTRACE */

ENTRY(ftrace_stub)
@@ -176,12 +297,21 @@ ENDPROC(ftrace_stub)
* and run return_to_handler() later on its exit.
*/
ENTRY(ftrace_graph_caller)
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
mcount_get_pc x0 // function's pc
mcount_get_lr_addr x1 // pointer to function's saved lr
mcount_get_parent_fp x2 // parent's fp
bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)

mcount_exit
+#else
+ add x9, sp, #16 /* advance to pt_regs to gather args */
+ ldr x0, [x9, #S_PC] /* pc */
+ add x1, x9, #S_LR /* &lr */
+ ldr x2, [x9, #S_STACKFRAME] /* fp */
+ bl prepare_ftrace_return
+ b ftrace_common_return
+#endif
ENDPROC(ftrace_graph_caller)

/*
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -65,18 +65,66 @@ int ftrace_update_ftrace_func(ftrace_fun
return ftrace_modify_code(pc, 0, new, false);
}

+#ifdef CONFIG_ARM64_MODULE_PLTS
+static int install_ftrace_trampoline(struct module *mod, unsigned long *addr)
+{
+ struct plt_entry trampoline, *mod_trampoline;
+
+ /* Iterate over
+ * mod->arch.ftrace_trampolines[MOD_ARCH_NR_FTRACE_TRAMPOLINES]
+ * The assignment to various ftrace functions happens here.
+ */
+ if (*addr == FTRACE_ADDR)
+ mod_trampoline = &mod->arch.ftrace_trampolines[0];
+ else if (*addr == FTRACE_REGS_ADDR)
+ mod_trampoline = &mod->arch.ftrace_trampolines[1];
+ else
+ return -EINVAL;
+
+ trampoline = get_plt_entry(*addr, mod_trampoline);
+
+ if (!plt_entries_equal(mod_trampoline, &trampoline)) {
+
+ /* point the trampoline at our ftrace entry point */
+ module_disable_ro(mod);
+ *mod_trampoline = trampoline;
+ module_enable_ro(mod, true);
+
+ /* update trampoline before patching in the branch */
+ smp_wmb();
+ }
+ *addr = (unsigned long)(void *)mod_trampoline;
+
+ return 0;
+}
+#endif
+
+/*
+ * Ftrace with regs generates the tracer calls as close as possible to
+ * the function entry; no stack frame has been set up at that point.
+ * In order to make another call e.g to ftrace_caller, the LR must be
+ * saved from being overwritten.
+ * Between two functions, and with IPA-RA turned off, the scratch registers
+ * are available, so move the LR to x9 before calling into ftrace.
+ * "mov x9, lr" is officially aliased from "orr x9, xzr, lr".
+ */
+#define QUICK_LR_SAVE aarch64_insn_gen_logical_shifted_reg( \
+ AARCH64_INSN_REG_9, AARCH64_INSN_REG_ZR, \
+ AARCH64_INSN_REG_LR, 0, AARCH64_INSN_VARIANT_64BIT, \
+ AARCH64_INSN_LOGIC_ORR)
+
/*
* Turn on the call to ftrace_caller() in instrumented function
*/
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned long pc = rec->ip;
+ unsigned long pc = rec->ip + REC_IP_BRANCH_OFFSET;
+ int ret;
u32 old, new;
long offset = (long)pc - (long)addr;

if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
- struct plt_entry trampoline;
struct module *mod;

/*
@@ -96,54 +144,65 @@ int ftrace_make_call(struct dyn_ftrace *
if (WARN_ON(!mod))
return -EINVAL;

- /*
- * There is only one ftrace trampoline per module. For now,
- * this is not a problem since on arm64, all dynamic ftrace
- * invocations are routed via ftrace_caller(). This will need
- * to be revisited if support for multiple ftrace entry points
- * is added in the future, but for now, the pr_err() below
- * deals with a theoretical issue only.
- */
- trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
- &trampoline)) {
- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
- &(struct plt_entry){})) {
- pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
- return -EINVAL;
- }
-
- /* point the trampoline to our ftrace entry point */
- module_disable_ro(mod);
- *mod->arch.ftrace_trampoline = trampoline;
- module_enable_ro(mod, true);
-
- /* update trampoline before patching in the branch */
- smp_wmb();
+ /* Check against our well-known list of ftrace entry points */
+ if (addr == FTRACE_ADDR || addr == FTRACE_REGS_ADDR) {
+ ret = install_ftrace_trampoline(mod, &addr);
+ if (ret < 0)
+ return ret;
}
- addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+ else
+ return -EINVAL;
+
#else /* CONFIG_ARM64_MODULE_PLTS */
return -EINVAL;
#endif /* CONFIG_ARM64_MODULE_PLTS */
}

old = aarch64_insn_gen_nop();
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) {
+ new = QUICK_LR_SAVE;
+ ret = ftrace_modify_code(pc - AARCH64_INSN_SIZE,
+ old, new, true);
+ if (ret)
+ return ret;
+ }
new = aarch64_insn_gen_branch_imm(pc, addr, AARCH64_INSN_BRANCH_LINK);

return ftrace_modify_code(pc, old, new, true);
}

+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ unsigned long pc = rec->ip + REC_IP_BRANCH_OFFSET;
+ u32 old, new;
+
+ old = aarch64_insn_gen_branch_imm(pc, old_addr, true);
+ new = aarch64_insn_gen_branch_imm(pc, addr, true);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+#endif
+
/*
* Turn off the call to ftrace_caller() in instrumented function
*/
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- unsigned long pc = rec->ip;
+ unsigned long pc = rec->ip + REC_IP_BRANCH_OFFSET;
bool validate = true;
+ int ret;
u32 old = 0, new;
long offset = (long)pc - (long)addr;

+ /* -fpatchable-function-entry= does not generate a profiling call
+ * initially; the NOPs are already there.
+ */
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && addr == MCOUNT_ADDR)
+ return 0;
+
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
u32 replaced;
@@ -188,7 +247,15 @@ int ftrace_make_nop(struct module *mod,

new = aarch64_insn_gen_nop();

- return ftrace_modify_code(pc, old, new, validate);
+ ret = ftrace_modify_code(pc, old, new, validate);
+ if (ret)
+ return ret;
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS)) {
+ old = QUICK_LR_SAVE;
+ ret = ftrace_modify_code(pc - AARCH64_INSN_SIZE,
+ old, new, true);
+ }
+ return ret;
}

void arch_ftrace_update_code(int command)
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -113,6 +113,7 @@
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
+ KEEP(*(__patchable_function_entries)) \
__stop_mcount_loc = .;
#else
#define MCOUNT_REC()
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -223,8 +223,12 @@ struct ftrace_likely_data {
#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
#define notrace __attribute__((hotpatch(0, 0)))
#else
+#if defined(CONFIG_ARM64) && defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS)
+#define notrace __attribute__((patchable_function_entry(0)))
+#else
#define notrace __attribute__((no_instrument_function))
#endif
+#endif

/*
* it doesn't make sense on ARM (currently the only user of __naked)
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -32,7 +32,8 @@ struct mod_arch_specific {
struct mod_plt_sec init;

/* for CONFIG_DYNAMIC_FTRACE */
- struct plt_entry *ftrace_trampoline;
+ struct plt_entry *ftrace_trampolines;
+#define MOD_ARCH_NR_FTRACE_TRAMPOLINES 2
};
#endif

--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -451,7 +451,7 @@ int module_finalize(const Elf_Ehdr *hdr,
#ifdef CONFIG_ARM64_MODULE_PLTS
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
!strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
- me->arch.ftrace_trampoline = (void *)s->sh_addr;
+ me->arch.ftrace_trampolines = (void *)s->sh_addr;
#endif
}

--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -323,7 +323,8 @@ int module_frob_arch_sections(Elf_Ehdr *
tramp->sh_type = SHT_NOBITS;
tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
tramp->sh_addralign = __alignof__(struct plt_entry);
- tramp->sh_size = sizeof(struct plt_entry);
+ tramp->sh_size = MOD_ARCH_NR_FTRACE_TRAMPOLINES
+ * sizeof(struct plt_entry);
}

return 0;
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -159,6 +159,7 @@
/*
* Register aliases.
*/
+fp .req x29 // frame pointer
lr .req x30 // link register

/*