Re: [PATCH v7 4/8] arm64: ftrace: Enable HAVE_FUNCTION_GRAPH_RETVAL
From: Mark Rutland
Date: Fri Mar 24 2023 - 08:58:23 EST
On Fri, Mar 24, 2023 at 05:37:27AM -0700, Donglin Peng wrote:
> The commit d4815c5d1bbd ("function_graph: Support recording and
> printing the return value of function") laid the groundwork for the
> for the funcgraph-retval, and this modification makes it available
> on the ARM64 platform.
>
> We introduce a new structure called fgraph_ret_regs for the ARM64
> platform to hold return registers and the frame pointer. We then
> fill its content in the return_to_handler and pass its address to
> the function ftrace_return_to_handler to record the return value.
I'm happy with this, or with using ftrace_regs and capturing more regs here.
This overall looks good, but there's one functional issue and a couple of minor
nits which I've detailed below.
>
> Signed-off-by: Donglin Peng <pengdonglin@xxxxxxxxxxxxxx>
> ---
> arch/arm64/Kconfig | 1 +
> arch/arm64/include/asm/ftrace.h | 23 +++++++++++++++++++++++
> arch/arm64/kernel/entry-ftrace.S | 9 +++++----
> 3 files changed, 29 insertions(+), 4 deletions(-)
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 1023e896d46b..48856d230800 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -195,6 +195,7 @@ config ARM64
> select HAVE_FTRACE_MCOUNT_RECORD
> select HAVE_FUNCTION_TRACER
> select HAVE_FUNCTION_ERROR_INJECTION
> + select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
> select HAVE_FUNCTION_GRAPH_TRACER
> select HAVE_GCC_PLUGINS
> select HAVE_HW_BREAKPOINT if PERF_EVENTS
> diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
> index 1c2672bbbf37..f68dcc41be3b 100644
> --- a/arch/arm64/include/asm/ftrace.h
> +++ b/arch/arm64/include/asm/ftrace.h
> @@ -170,4 +170,27 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
> }
> #endif /* ifndef __ASSEMBLY__ */
>
> +#ifndef __ASSEMBLY__
> +
> +#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
> +struct fgraph_ret_regs {
> + /* x0 - x7 */
> + u64 regs[8];
> +
> + u64 fp;
> +};
As a minor nit, for ftrace_regs we used `unsigned long` rather than `u64`;
could we do the same here for consistency?
This will need to be padded to 16 bytes, as within the kernel, arm64 requires
the SP to be aligned to 16 bytes at all time. Please can you add an `__unused`
field, like we have in ftrace_regs, to ensure that?
> +
> +static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
> +{
> + return ret_regs->regs[0];
> +}
> +
> +static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
> +{
> + return ret_regs->fp;
> +}
> +#endif
> +
> +#endif
> +
> #endif /* __ASM_FTRACE_H */
> diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
> index 350ed81324ac..8ac6f952e68f 100644
> --- a/arch/arm64/kernel/entry-ftrace.S
> +++ b/arch/arm64/kernel/entry-ftrace.S
> @@ -270,14 +270,15 @@ SYM_FUNC_END(ftrace_stub_graph)
> */
> SYM_CODE_START(return_to_handler)
> /* save return value regs */
> - sub sp, sp, #64
> + sub sp, sp, #72
> stp x0, x1, [sp]
> stp x2, x3, [sp, #16]
> stp x4, x5, [sp, #32]
> stp x6, x7, [sp, #48]
> + str x29, [sp, #64] // parent's fp
As above, this will need to be padded to keep the stack aligned to 16 bytes,
and I'd prefer if we could use asm-offsets so that we can have something like:
sub sp, sp, #FRET_REGS_SIZE
stp x0, x1, [sp, #FRET_REGS_X0]
stp x2, x3, [sp, #FRET_REGS_X2]
stp x4, x5, [sp, #FRET_REGS_X4]
stp x6, x7, [sp, #FRET_REGS_X6]
str x29, [sp, FRET_REGS_FP]
>
> - mov x0, x29 // parent's fp
> - bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
> + mov x0, sp
> + bl ftrace_return_to_handler// addr = ftrace_return_to_hander(regs);
> mov x30, x0 // restore the original return address
>
> /* restore return value regs */
> @@ -285,7 +286,7 @@ SYM_CODE_START(return_to_handler)
> ldp x2, x3, [sp, #16]
> ldp x4, x5, [sp, #32]
> ldp x6, x7, [sp, #48]
> - add sp, sp, #64
> + add sp, sp, #72
Likewise here.
Other than that, this looks good to me, thanks for respinning!
Thanks,
Mark.