[PATCH v22 01/20] fgraph: Get ftrace recursion lock in function_graph_enter

From: Masami Hiramatsu (Google)
Date: Thu Dec 26 2024 - 00:12:17 EST


From: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>

Get the ftrace recursion lock in the generic function_graph_enter()
instead of each architecture code.
This changes all function_graph tracer callbacks running in
non-preemptive state. On x86 and powerpc, this is by default, but
on the other architecutres, this will be new.

Signed-off-by: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx>
Cc: Nicholas Piggin <npiggin@xxxxxxxxx>
Cc: Christophe Leroy <christophe.leroy@xxxxxxxxxx>
Cc: Naveen N Rao <naveen@xxxxxxxxxx>
Cc: Madhavan Srinivasan <maddy@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: x86@xxxxxxxxxx
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: Mathieu Desnoyers <mathieu.desnoyers@xxxxxxxxxxxx>
---
Changes in v21:
- Newly added.
---
arch/powerpc/kernel/trace/ftrace.c | 6 ------
arch/powerpc/kernel/trace/ftrace_64_pg.c | 6 ------
arch/x86/kernel/ftrace.c | 7 -------
kernel/trace/fgraph.c | 8 +++++++-
4 files changed, 7 insertions(+), 20 deletions(-)

diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 5ccd791761e8..e41daf2c4a31 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -658,7 +658,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1];
- int bit;

if (unlikely(ftrace_graph_is_dead()))
goto out;
@@ -666,14 +665,9 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;

- bit = ftrace_test_recursion_trylock(ip, parent_ip);
- if (bit < 0)
- goto out;
-
if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
parent_ip = ppc_function_entry(return_to_handler);

- ftrace_test_recursion_unlock(bit);
out:
arch_ftrace_regs(fregs)->regs.link = parent_ip;
}
diff --git a/arch/powerpc/kernel/trace/ftrace_64_pg.c b/arch/powerpc/kernel/trace/ftrace_64_pg.c
index 98787376eb87..8fb860b90ae1 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_pg.c
+++ b/arch/powerpc/kernel/trace/ftrace_64_pg.c
@@ -790,7 +790,6 @@ static unsigned long
__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
{
unsigned long return_hooker;
- int bit;

if (unlikely(ftrace_graph_is_dead()))
goto out;
@@ -798,16 +797,11 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;

- bit = ftrace_test_recursion_trylock(ip, parent);
- if (bit < 0)
- goto out;
-
return_hooker = ppc_function_entry(return_to_handler);

if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
parent = return_hooker;

- ftrace_test_recursion_unlock(bit);
out:
return parent;
}
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 4dd0ad6c94d6..33f50c80f481 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -615,7 +615,6 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long)&return_to_handler;
- int bit;

/*
* When resuming from suspend-to-ram, this function can be indirectly
@@ -635,14 +634,8 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;

- bit = ftrace_test_recursion_trylock(ip, *parent);
- if (bit < 0)
- return;
-
if (!function_graph_enter(*parent, ip, frame_pointer, parent))
*parent = return_hooker;
-
- ftrace_test_recursion_unlock(bit);
}

#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index ddedcb50917f..5c68d6109119 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -650,8 +650,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
struct ftrace_graph_ent trace;
unsigned long bitmap = 0;
int offset;
+ int bit;
int i;

+ bit = ftrace_test_recursion_trylock(func, ret);
+ if (bit < 0)
+ return -EBUSY;
+
trace.func = func;
trace.depth = ++current->curr_ret_depth;

@@ -697,12 +702,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
* flag, set that bit always.
*/
set_bitmap(current, offset, bitmap | BIT(0));
-
+ ftrace_test_recursion_unlock(bit);
return 0;
out_ret:
current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
out:
current->curr_ret_depth--;
+ ftrace_test_recursion_unlock(bit);
return -EBUSY;
}