Re: [RFC PATCH 1/4] trace: Simplify kprobe overridable function check
From: Google
Date: Thu Apr 02 2026 - 09:27:18 EST
On Thu, 2 Apr 2026 17:26:04 +0800
Yafang Shao <laoar.shao@xxxxxxxxx> wrote:
> Simplify the logic for checking overridable kprobe functions by removing
> redundant code.
>
> No functional change.
NACK.
trace_kprobe must be hidden inside the trace_kprobe.c. It is not
designed to be exposed.
Thank you,
>
> Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx>
> ---
> kernel/trace/bpf_trace.c | 13 ++++++---
> kernel/trace/trace_kprobe.c | 40 +++++----------------------
> kernel/trace/trace_probe.h | 54 ++++++++++++++++++++++++++-----------
> 3 files changed, 54 insertions(+), 53 deletions(-)
>
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index 0b040a417442..c901ace836cb 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -1929,10 +1929,15 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
> * Kprobe override only works if they are on the function entry,
> * and only if they are on the opt-in list.
> */
> - if (prog->kprobe_override &&
> - (!trace_kprobe_on_func_entry(event->tp_event) ||
> - !trace_kprobe_error_injectable(event->tp_event)))
> - return -EINVAL;
> + if (prog->kprobe_override) {
> + struct trace_kprobe *tp = trace_kprobe_primary_from_call(event->tp_event);
> +
> + if (!tp)
> + return -EINVAL;
> + if (!trace_kprobe_on_func_entry(tp) ||
> + !trace_kprobe_error_injectable(tp))
> + return -EINVAL;
> + }
>
> mutex_lock(&bpf_event_mutex);
>
> diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
> index a5dbb72528e0..768702674a5c 100644
> --- a/kernel/trace/trace_kprobe.c
> +++ b/kernel/trace/trace_kprobe.c
> @@ -53,17 +53,6 @@ static struct dyn_event_operations trace_kprobe_ops = {
> .match = trace_kprobe_match,
> };
>
> -/*
> - * Kprobe event core functions
> - */
> -struct trace_kprobe {
> - struct dyn_event devent;
> - struct kretprobe rp; /* Use rp.kp for kprobe use */
> - unsigned long __percpu *nhit;
> - const char *symbol; /* symbol name */
> - struct trace_probe tp;
> -};
> -
> static bool is_trace_kprobe(struct dyn_event *ev)
> {
> return ev->ops == &trace_kprobe_ops;
> @@ -212,33 +201,16 @@ unsigned long trace_kprobe_address(struct trace_kprobe *tk)
> return addr;
> }
>
> -static nokprobe_inline struct trace_kprobe *
> -trace_kprobe_primary_from_call(struct trace_event_call *call)
> -{
> - struct trace_probe *tp;
> -
> - tp = trace_probe_primary_from_call(call);
> - if (WARN_ON_ONCE(!tp))
> - return NULL;
> -
> - return container_of(tp, struct trace_kprobe, tp);
> -}
> -
> -bool trace_kprobe_on_func_entry(struct trace_event_call *call)
> +bool trace_kprobe_on_func_entry(struct trace_kprobe *tp)
> {
> - struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
> -
> - return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
> - tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
> - tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
> + return !kprobe_on_func_entry(tp->rp.kp.addr,
> + tp->rp.kp.addr ? NULL : tp->rp.kp.symbol_name,
> + tp->rp.kp.addr ? 0 : tp->rp.kp.offset);
> }
>
> -bool trace_kprobe_error_injectable(struct trace_event_call *call)
> +bool trace_kprobe_error_injectable(struct trace_kprobe *tp)
> {
> - struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
> -
> - return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
> - false;
> + return within_error_injection_list(trace_kprobe_address(tp));
> }
>
> static int register_kprobe_event(struct trace_kprobe *tk);
> diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
> index 9fc56c937130..958eb78a9068 100644
> --- a/kernel/trace/trace_probe.h
> +++ b/kernel/trace/trace_probe.h
> @@ -30,6 +30,7 @@
>
> #include "trace.h"
> #include "trace_output.h"
> +#include "trace_dynevent.h"
>
> #define MAX_TRACE_ARGS 128
> #define MAX_ARGSTR_LEN 63
> @@ -210,21 +211,6 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(symbol);
> #define ASSIGN_FETCH_TYPE_END {}
> #define MAX_ARRAY_LEN 64
>
> -#ifdef CONFIG_KPROBE_EVENTS
> -bool trace_kprobe_on_func_entry(struct trace_event_call *call);
> -bool trace_kprobe_error_injectable(struct trace_event_call *call);
> -#else
> -static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
> -{
> - return false;
> -}
> -
> -static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
> -{
> - return false;
> -}
> -#endif /* CONFIG_KPROBE_EVENTS */
> -
> struct probe_arg {
> struct fetch_insn *code;
> bool dynamic;/* Dynamic array (string) is used */
> @@ -271,6 +257,32 @@ struct event_file_link {
> struct list_head list;
> };
>
> +/*
> + * Kprobe event core functions
> + */
> +struct trace_kprobe {
> + struct dyn_event devent;
> + struct kretprobe rp; /* Use rp.kp for kprobe use */
> + unsigned long __percpu *nhit;
> + const char *symbol; /* symbol name */
> + struct trace_probe tp;
> +};
> +
> +#ifdef CONFIG_KPROBE_EVENTS
> +bool trace_kprobe_on_func_entry(struct trace_kprobe *tp);
> +bool trace_kprobe_error_injectable(struct trace_kprobe *tp);
> +#else
> +static inline bool trace_kprobe_on_func_entry(struct trace_kprobe *tp)
> +{
> + return false;
> +}
> +
> +static inline bool trace_kprobe_error_injectable(struct trace_kprobe *tp)
> +{
> + return false;
> +}
> +#endif /* CONFIG_KPROBE_EVENTS */
> +
> static inline unsigned int trace_probe_load_flag(struct trace_probe *tp)
> {
> return smp_load_acquire(&tp->event->flags);
> @@ -329,6 +341,18 @@ trace_probe_primary_from_call(struct trace_event_call *call)
> return list_first_entry_or_null(&tpe->probes, struct trace_probe, list);
> }
>
> +static nokprobe_inline struct trace_kprobe *
> +trace_kprobe_primary_from_call(struct trace_event_call *call)
> +{
> + struct trace_probe *tp;
> +
> + tp = trace_probe_primary_from_call(call);
> + if (WARN_ON_ONCE(!tp))
> + return NULL;
> +
> + return container_of(tp, struct trace_kprobe, tp);
> +}
> +
> static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp)
> {
> return &tp->event->probes;
> --
> 2.47.3
>
--
Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>