Re: [PATCH v10 perf, bpf-next 3/9] perf, bpf: introduce PERF_RECORD_BPF_EVENT

From: Peter Zijlstra
Date: Thu Jan 17 2019 - 08:10:00 EST


On Wed, Jan 16, 2019 at 08:29:25AM -0800, Song Liu wrote:
> + /*
> + * Record bpf events:
> + * enum perf_bpf_event_type {
> + * PERF_BPF_EVENT_UNKNOWN = 0,
> + * PERF_BPF_EVENT_PROG_LOAD = 1,
> + * PERF_BPF_EVENT_PROG_UNLOAD = 2,
> + * };
> + *
> + * struct {
> + * struct perf_event_header header;
> + * u16 type;
> + * u16 flags;
> + * u32 id;
> + * u8 tag[BPF_TAG_SIZE];

This does forever fix BPF_TAG_SIZE; is that intentional? We could easily
make that a variable length field like with the other event. Or is that
value already part of the eBPF ABI?

> + * struct sample_id sample_id;
> + * };
> + */
> + PERF_RECORD_BPF_EVENT = 18,
> @@ -7744,6 +7747,121 @@ void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, bool unregister,
> WARN_ONCE(1, "%s: Invalid KSYMBOL type 0x%x\n", __func__, ksym_type);
> }
>
> +struct perf_bpf_event {
> + struct bpf_prog *prog;
> + struct {
> + struct perf_event_header header;
> + u16 type;
> + u16 flags;
> + u32 id;
> + u8 tag[BPF_TAG_SIZE];
> + } event_id;
> +};

> +static void perf_event_bpf_emit_ksymbols(struct bpf_prog *prog,
> + enum perf_bpf_event_type type)
> +{
> + bool unregister = type == PERF_BPF_EVENT_PROG_UNLOAD;
> + int i;
> +
> + if (prog->aux->func_cnt == 0) {
> + perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF,
> + (u64)(unsigned long)prog->bpf_func,
> + prog->jited_len, unregister,
> + perf_event_bpf_get_name, prog);
> + } else {
> + for (i = 0; i < prog->aux->func_cnt; i++) {
> + struct bpf_prog *subprog = prog->aux->func[i];
> +
> + perf_event_ksymbol(
> + PERF_RECORD_KSYMBOL_TYPE_BPF,
> + (u64)(unsigned long)subprog->bpf_func,
> + subprog->jited_len, unregister,
> + perf_event_bpf_get_name, subprog);
> + }
> + }
> +}

I still think this is a weird place to do this.. :-) See them patches I
just send.

> +void perf_event_bpf_event(struct bpf_prog *prog,
> + enum perf_bpf_event_type type,
> + u16 flags)
> +{
> + struct perf_bpf_event bpf_event;
> +
> + if (type <= PERF_BPF_EVENT_UNKNOWN ||
> + type >= PERF_BPF_EVENT_MAX)
> + return;
> +
> + switch (type) {
> + case PERF_BPF_EVENT_PROG_LOAD:
> + case PERF_BPF_EVENT_PROG_UNLOAD:
> + if (atomic_read(&nr_ksymbol_events))
> + perf_event_bpf_emit_ksymbols(prog, type);
> + break;
> + default:
> + break;
> + }
> +
> + if (!atomic_read(&nr_bpf_events))
> + return;
> +
> + bpf_event = (struct perf_bpf_event){
> + .prog = prog,
> + .event_id = {
> + .header = {
> + .type = PERF_RECORD_BPF_EVENT,
> + .size = sizeof(bpf_event.event_id),
> + },
> + .type = type,
> + .flags = flags,
> + .id = prog->aux->id,
> + },
> + };

BUILD_BUG_ON(BPF_TAG_SIZE % sizeof(u64));

> + memcpy(bpf_event.event_id.tag, prog->tag, BPF_TAG_SIZE);
> + perf_iterate_sb(perf_event_bpf_output, &bpf_event, NULL);
> +}

Anyway, small nits only:

Acked-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxxx>