Re: Linux 6.1-rc3 build fail in include/linux/bpf.h

From: Jiri Olsa
Date: Mon Oct 31 2022 - 17:54:05 EST


On Mon, Oct 31, 2022 at 04:21:42PM +0100, Peter Zijlstra wrote:
> On Mon, Oct 31, 2022 at 01:17:16PM +0100, Jiri Olsa wrote:
> > On Mon, Oct 31, 2022 at 11:14:31AM +0000, David Laight wrote:
> > > The 6.1-rc3 sources fail to build because bpf.h unconditionally
> > > #define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
> > > for X86_64 builds.
> > >
> > > I'm pretty sure that should depend on some other options
> > > since the compiler isn't required to support it.
> > > (The gcc 7.5.0 on my Ubunti 18.04 system certainly doesn't)
> > >
> > > The only other reference to that attribute is in the definition
> > > of 'notrace' in compiler.h.
> >
> > I guess we need to make some __has_attribute check and make all that conditional
> >
> > cc-ing Peter
>
> Does something crazy like the below work? It compiles but is otherwise
> totally untested.

looks good

it has now the ftrace nop and the jump to the dispatcher image
or the bpf_dispatcher_nop_func.. great :)

bpf_dispatcher_xdp_func:

ffffffff81cc87b0 <load1+0xcc87b0>:
ffffffff81cc87b0: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
ffffffff81cc87b5: e9 a6 fe 65 ff jmp 0xffffffff81328660

tests work for me.. Toke, Björn, could you please check?

thanks,
jirka


>
> ---
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 9e7d46d16032..7d7a00306d19 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -953,6 +953,10 @@ struct bpf_dispatcher {
> void *rw_image;
> u32 image_off;
> struct bpf_ksym ksym;
> +#ifdef CONFIG_HAVE_STATIC_CALL
> + struct static_call_key *sc_key;
> + void *sc_tramp;
> +#endif
> };
>
> static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
> @@ -970,6 +974,20 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
> struct bpf_attach_target_info *tgt_info);
> void bpf_trampoline_put(struct bpf_trampoline *tr);
> int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
> +
> +
> +#ifdef CONFIG_HAVE_STATIC_CALL
> +#define BPF_DISPATCH_CALL(name) static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)
> +
> +#define __BPF_DISPATCHER_SC_INIT(_name) \
> + .sc_key = &STATIC_CALL_KEY(_name), \
> + .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),
> +
> +#else
> +#define BPF_DISPATCH_CALL(name) bpf_func(ctx, insnsi)
> +#define __BPF_DISPATCHER_SC_INIT(name)
> +#endif
> +
> #define BPF_DISPATCHER_INIT(_name) { \
> .mutex = __MUTEX_INITIALIZER(_name.mutex), \
> .func = &_name##_func, \
> @@ -981,32 +999,29 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func
> .name = #_name, \
> .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
> }, \
> + __BPF_DISPATCHER_SC_INIT(_name##_call) \
> }
>
> -#ifdef CONFIG_X86_64
> -#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
> -#else
> -#define BPF_DISPATCHER_ATTRIBUTES
> -#endif
> -
> #define DEFINE_BPF_DISPATCHER(name) \
> - notrace BPF_DISPATCHER_ATTRIBUTES \
> + DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func); \
> noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \
> const void *ctx, \
> const struct bpf_insn *insnsi, \
> bpf_func_t bpf_func) \
> { \
> - return bpf_func(ctx, insnsi); \
> + return BPF_DISPATCH_CALL(name); \
> } \
> EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
> struct bpf_dispatcher bpf_dispatcher_##name = \
> BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
> +
> #define DECLARE_BPF_DISPATCHER(name) \
> unsigned int bpf_dispatcher_##name##_func( \
> const void *ctx, \
> const struct bpf_insn *insnsi, \
> bpf_func_t bpf_func); \
> extern struct bpf_dispatcher bpf_dispatcher_##name;
> +
> #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
> #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
> void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
> diff --git a/kernel/bpf/dispatcher.c b/kernel/bpf/dispatcher.c
> index fa64b80b8bca..1ca8bd6da6bb 100644
> --- a/kernel/bpf/dispatcher.c
> +++ b/kernel/bpf/dispatcher.c
> @@ -4,6 +4,7 @@
> #include <linux/hash.h>
> #include <linux/bpf.h>
> #include <linux/filter.h>
> +#include <linux/static_call.h>
>
> /* The BPF dispatcher is a multiway branch code generator. The
> * dispatcher is a mechanism to avoid the performance penalty of an
> @@ -106,7 +107,6 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
> {
> void *old, *new, *tmp;
> u32 noff;
> - int err;
>
> if (!prev_num_progs) {
> old = NULL;
> @@ -128,11 +128,10 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
> return;
> }
>
> - err = bpf_arch_text_poke(d->func, BPF_MOD_JUMP, old, new);
> - if (err || !new)
> - return;
> + __static_call_update(d->sc_key, d->sc_tramp, new ?: &bpf_dispatcher_nop_func);
>
> - d->image_off = noff;
> + if (new)
> + d->image_off = noff;
> }
>
> void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,