Re: [PATCH v9 29/36] bpf: Enable kprobe_multi feature if CONFIG_FPROBE is enabled

From: Andrii Nakryiko
Date: Thu Apr 25 2024 - 16:09:55 EST


On Mon, Apr 15, 2024 at 6:22 AM Masami Hiramatsu (Google)
<mhiramat@xxxxxxxxxx> wrote:
>
> From: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>
>
> Enable kprobe_multi feature if CONFIG_FPROBE is enabled. The pt_regs is
> converted from ftrace_regs by ftrace_partial_regs(), thus some registers
> may always returns 0. But it should be enough for function entry (access
> arguments) and exit (access return value).
>
> Signed-off-by: Masami Hiramatsu (Google) <mhiramat@xxxxxxxxxx>
> Acked-by: Florent Revest <revest@xxxxxxxxxxxx>
> ---
> Changes from previous series: NOTHING, Update against the new series.
> ---
> kernel/trace/bpf_trace.c | 22 +++++++++-------------
> 1 file changed, 9 insertions(+), 13 deletions(-)
>
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index e51a6ef87167..57b1174030c9 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -2577,7 +2577,7 @@ static int __init bpf_event_init(void)
> fs_initcall(bpf_event_init);
> #endif /* CONFIG_MODULES */
>
> -#if defined(CONFIG_FPROBE) && defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS)
> +#ifdef CONFIG_FPROBE
> struct bpf_kprobe_multi_link {
> struct bpf_link link;
> struct fprobe fp;
> @@ -2600,6 +2600,8 @@ struct user_syms {
> char *buf;
> };
>
> +static DEFINE_PER_CPU(struct pt_regs, bpf_kprobe_multi_pt_regs);

this is a waste if CONFIG_HAVE_PT_REGS_TO_FTRACE_REGS_CAST=y, right?
Can we guard it?


> +
> static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 cnt)
> {
> unsigned long __user usymbol;
> @@ -2792,13 +2794,14 @@ static u64 bpf_kprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
>
> static int
> kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
> - unsigned long entry_ip, struct pt_regs *regs)
> + unsigned long entry_ip, struct ftrace_regs *fregs)
> {
> struct bpf_kprobe_multi_run_ctx run_ctx = {
> .link = link,
> .entry_ip = entry_ip,
> };
> struct bpf_run_ctx *old_run_ctx;
> + struct pt_regs *regs;
> int err;
>
> if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
> @@ -2809,6 +2812,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
>
> migrate_disable();
> rcu_read_lock();
> + regs = ftrace_partial_regs(fregs, this_cpu_ptr(&bpf_kprobe_multi_pt_regs));

and then pass NULL if defined(CONFIG_HAVE_PT_REGS_TO_FTRACE_REGS_CAST)?


> old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
> err = bpf_prog_run(link->link.prog, regs);
> bpf_reset_run_ctx(old_run_ctx);
> @@ -2826,13 +2830,9 @@ kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
> void *data)
> {
> struct bpf_kprobe_multi_link *link;
> - struct pt_regs *regs = ftrace_get_regs(fregs);
> -
> - if (!regs)
> - return 0;
>
> link = container_of(fp, struct bpf_kprobe_multi_link, fp);
> - kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
> + kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), fregs);
> return 0;
> }
>
> @@ -2842,13 +2842,9 @@ kprobe_multi_link_exit_handler(struct fprobe *fp, unsigned long fentry_ip,
> void *data)
> {
> struct bpf_kprobe_multi_link *link;
> - struct pt_regs *regs = ftrace_get_regs(fregs);
> -
> - if (!regs)
> - return;
>
> link = container_of(fp, struct bpf_kprobe_multi_link, fp);
> - kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
> + kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), fregs);
> }
>
> static int symbols_cmp_r(const void *a, const void *b, const void *priv)
> @@ -3107,7 +3103,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
> kvfree(cookies);
> return err;
> }
> -#else /* !CONFIG_FPROBE || !CONFIG_DYNAMIC_FTRACE_WITH_REGS */
> +#else /* !CONFIG_FPROBE */
> int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
> {
> return -EOPNOTSUPP;
>
>