Re: [PATCH v19 18/19] ftrace: Add ftrace_get_symaddr to convert fentry_ip to symaddr

From: Google
Date: Mon Dec 09 2024 - 03:05:21 EST


On Mon, 11 Nov 2024 00:52:21 +0900
"Masami Hiramatsu (Google)" <mhiramat@xxxxxxxxxx> wrote:

> From: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>
>
> This introduces ftrace_get_symaddr() which tries to convert fentry_ip
> passed by ftrace or fgraph callback to symaddr without calling
> kallsyms API. It returns the symbol address or 0 if it fails to
> convert it.
>
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>
> ---
> Changes in v19:
> - Newly added.
> ---
> arch/arm64/include/asm/ftrace.h | 2 +
> arch/arm64/kernel/ftrace.c | 63 +++++++++++++++++++++++++++++++++++++++
> arch/x86/include/asm/ftrace.h | 21 +++++++++++++
> include/linux/ftrace.h | 13 ++++++++
> 4 files changed, 99 insertions(+)
>
> diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
> index 876e88ad4119..f08e70bf09ea 100644
> --- a/arch/arm64/include/asm/ftrace.h
> +++ b/arch/arm64/include/asm/ftrace.h
> @@ -52,6 +52,8 @@ extern unsigned long ftrace_graph_call;
> extern void return_to_handler(void);
>
> unsigned long ftrace_call_adjust(unsigned long addr);
> +unsigned long arch_ftrace_call_adjust(unsigned long fentry_ip);
> +#define ftrace_call_adjust(fentry_ip) arch_ftrace_call_adjust(fentry_ip)

Oops, this is arch_ftrace_get_symaddr()!

It needs to be fixed.

Thanks,

>
> #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
> #define HAVE_ARCH_FTRACE_REGS
> diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
> index 606fd6994578..de1223669758 100644
> --- a/arch/arm64/kernel/ftrace.c
> +++ b/arch/arm64/kernel/ftrace.c
> @@ -143,6 +143,69 @@ unsigned long ftrace_call_adjust(unsigned long addr)
> return addr;
> }
>
> +/* Convert fentry_ip to the symbol address without kallsyms */
> +unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
> +{
> + u32 insn;
> +
> + /*
> + * When using patchable-function-entry without pre-function NOPS, ftrace
> + * entry is the address of the first NOP after the function entry point.
> + *
> + * The compiler has either generated:
> + *
> + * func+00: func: NOP // To be patched to MOV X9, LR
> + * func+04: NOP // To be patched to BL <caller>
> + *
> + * Or:
> + *
> + * func-04: BTI C
> + * func+00: func: NOP // To be patched to MOV X9, LR
> + * func+04: NOP // To be patched to BL <caller>
> + *
> + * The fentry_ip is the address of `BL <caller>` which is at `func + 4`
> + * bytes in either case.
> + */
> + if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS))
> + return fentry_ip - AARCH64_INSN_SIZE;
> +
> + /*
> + * When using patchable-function-entry with pre-function NOPs, BTI is
> + * a bit different.
> + *
> + * func+00: func: NOP // To be patched to MOV X9, LR
> + * func+04: NOP // To be patched to BL <caller>
> + *
> + * Or:
> + *
> + * func+00: func: BTI C
> + * func+04: NOP // To be patched to MOV X9, LR
> + * func+08: NOP // To be patched to BL <caller>
> + *
> + * The fentry_ip is the address of `BL <caller>` which is at either
> + * `func + 4` or `func + 8` depends on whether there is a BTI.
> + */
> +
> + /* If there is no BTI, the func address should be one instruction before. */
> + if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
> + return fentry_ip - AARCH64_INSN_SIZE;
> +
> + /* We want to be extra safe in case entry ip is on the page edge,
> + * but otherwise we need to avoid get_kernel_nofault()'s overhead.
> + */
> + if ((fentry_ip & ~PAGE_MASK) < AARCH64_INSN_SIZE * 2) {
> + if (get_kernel_nofault(insn, (u32 *)(fentry_ip - AARCH64_INSN_SIZE * 2)))
> + return 0;
> + } else {
> + insn = *(u32 *)(fentry_ip - AARCH64_INSN_SIZE * 2);
> + }
> +
> + if (aarch64_insn_is_bti(le32_to_cpu((__le32)insn)))
> + return fentry_ip - AARCH64_INSN_SIZE * 2;
> +
> + return fentry_ip - AARCH64_INSN_SIZE;
> +}
> +
> /*
> * Replace a single instruction, which may be a branch or NOP.
> * If @validate == true, a replaced instruction is checked against 'old'.
> diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
> index cc92c99ef276..f9cb4d07df58 100644
> --- a/arch/x86/include/asm/ftrace.h
> +++ b/arch/x86/include/asm/ftrace.h
> @@ -34,6 +34,27 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
> return addr;
> }
>
> +static inline unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip)
> +{
> +#ifdef CONFIG_X86_KERNEL_IBT
> + u32 instr;
> +
> + /* We want to be extra safe in case entry ip is on the page edge,
> + * but otherwise we need to avoid get_kernel_nofault()'s overhead.
> + */
> + if ((fentry_ip & ~PAGE_MASK) < ENDBR_INSN_SIZE) {
> + if (get_kernel_nofault(instr, (u32 *)(fentry_ip - ENDBR_INSN_SIZE)))
> + return fentry_ip;
> + } else {
> + instr = *(u32 *)(fentry_ip - ENDBR_INSN_SIZE);
> + }
> + if (is_endbr(instr))
> + fentry_ip -= ENDBR_INSN_SIZE;
> +#endif
> + return fentry_ip;
> +}
> +#define ftrace_get_symaddr(fentry_ip) arch_ftrace_get_symaddr(fentry_ip)
> +
> #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
>
> #include <linux/ftrace_regs.h>
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 4c553fe9c026..9659bb2cd76c 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -652,6 +652,19 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
>
> bool is_ftrace_trampoline(unsigned long addr);
>
> +/* Arches can override ftrace_get_symaddr() to convert fentry_ip to symaddr. */
> +#ifndef ftrace_get_symaddr
> +/**
> + * ftrace_get_symaddr - return the symbol address from fentry_ip
> + * @fentry_ip: the address of ftrace location
> + *
> + * Get the symbol address from @fentry_ip (fast path). If there is no fast
> + * search path, this returns 0.
> + * User may need to use kallsyms API to find the symbol address.
> + */
> +#define ftrace_get_symaddr(fentry_ip) (0)
> +#endif
> +
> /*
> * The dyn_ftrace record's flags field is split into two parts.
> * the first part which is '0-FTRACE_REF_MAX' is a counter of
>
>


--
Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>