Re: [PATCH v7 05/10] perf record --off-cpu: Dump off-cpu samples in BPF
From: Ian Rogers
Date: Mon Nov 11 2024 - 13:32:25 EST
On Mon, Nov 11, 2024 at 10:05 AM Howard Chu <howardchu95@xxxxxxxxx> wrote:
>
> Hi Ian,
>
> On Mon, Nov 11, 2024 at 9:54 AM Ian Rogers <irogers@xxxxxxxxxx> wrote:
> >
> > On Fri, Nov 8, 2024 at 12:41 PM Howard Chu <howardchu95@xxxxxxxxx> wrote:
> > >
> > > Collect tid, period, callchain, and cgroup id and dump them when off-cpu
> > > time threshold is reached.
> > >
> > > We don't collect the off-cpu time twice (the delta), it's either in
> > > direct samples, or accumulated samples that are dumped at the end of
> > > perf.data.
> > >
> > > Suggested-by: Namhyung Kim <namhyung@xxxxxxxxxx>
> > > Signed-off-by: Howard Chu <howardchu95@xxxxxxxxx>
> > > ---
> > > tools/perf/util/bpf_skel/off_cpu.bpf.c | 83 ++++++++++++++++++++++++--
> > > 1 file changed, 78 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
> > > index dc6acafb9353..bf652c30b1c9 100644
> > > --- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
> > > +++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
> > > @@ -18,10 +18,18 @@
> > > #define MAX_STACKS 32
> > > #define MAX_ENTRIES 102400
> > >
> > > +#define MAX_CPUS 4096
> > > +#define MAX_OFFCPU_LEN 37
> > > +
> > > +struct stack {
> > > + u64 array[MAX_STACKS];
> > > +};
> > > +
> > > struct tstamp_data {
> > > __u32 stack_id;
> > > __u32 state;
> > > __u64 timestamp;
> > > + struct stack stack;
> > > };
> > >
> > > struct offcpu_key {
> > > @@ -39,6 +47,24 @@ struct {
> > > __uint(max_entries, MAX_ENTRIES);
> > > } stacks SEC(".maps");
> > >
> > > +struct offcpu_data {
> > > + u64 array[MAX_OFFCPU_LEN];
> > > +};
> > > +
> > > +struct {
> > > + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
> > > + __uint(key_size, sizeof(__u32));
> > > + __uint(value_size, sizeof(__u32));
> > > + __uint(max_entries, MAX_CPUS);
> > > +} offcpu_output SEC(".maps");
> >
> > Does patch 4 build without this definition? (we're in patch 5 here). I
> > think this should be in patch 4.
>
> Okay sure thanks :)
>
> >
> > > +
> > > +struct {
> > > + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
> > > + __uint(key_size, sizeof(__u32));
> > > + __uint(value_size, sizeof(struct offcpu_data));
> > > + __uint(max_entries, 1);
> > > +} offcpu_payload SEC(".maps");
> > > +
> > > struct {
> > > __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
> > > __uint(map_flags, BPF_F_NO_PREALLOC);
> > > @@ -185,6 +211,39 @@ static inline int can_record(struct task_struct *t, int state)
> > > return 1;
> > > }
> > >
> > > +static inline int copy_stack(struct stack *from, struct offcpu_data *to, int n)
> > > +{
> > > + int len = 0;
> > > +
> > > + for (int i = 0; i < MAX_STACKS && from->array[i]; ++i, ++len)
> > > + to->array[n + 2 + i] = from->array[i];
> > > +
> > > + return len;
> > > +}
> > > +
> >
> > Dump is something of a generic name. Could you kernel-doc this
> > function to describe the behavior?
>
> Sure.
>
> >
> > > +static int off_cpu_dump(void *ctx, struct offcpu_data *data, struct offcpu_key *key,
> > > + struct stack *stack, __u64 delta, __u64 timestamp)
> > > +{
> > > + /* dump tid, period, callchain, and cgroup id */
> > > + int n = 0, len = 0;
> > > +
> > > + data->array[n++] = (u64)key->tgid << 32 | key->pid;
> > > + data->array[n++] = delta;
> > > +
> > > + /* data->array[n] is callchain->nr (updated later) */
> > > + data->array[n + 1] = PERF_CONTEXT_USER;
> > > + data->array[n + 2] = 0;
> > > + len = copy_stack(stack, data, n);
> > > +
> > > + /* update length of callchain */
> > > + data->array[n] = len + 1;
> > > + n += len + 2;
> > > +
> > > + data->array[n++] = key->cgroup_id;
> > > +
> > > + return bpf_perf_event_output(ctx, &offcpu_output, BPF_F_CURRENT_CPU, data, n * sizeof(u64));
> > > +}
> > > +
> > > static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
> > > struct task_struct *next, int state)
> > > {
> > > @@ -209,6 +268,12 @@ static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
> > > pelem->state = state;
> > > pelem->stack_id = stack_id;
> > >
> > > + /*
> > > + * If stacks are successfully collected by bpf_get_stackid(), collect them once more
> > > + * in task_storage for direct off-cpu sample dumping
> > > + */
> > > + if (stack_id > 0 && bpf_get_stack(ctx, &pelem->stack, MAX_STACKS * sizeof(u64), BPF_F_USER_STACK)) {
> > > + }
> >
> > Why the empty if?
>
> Forgot to say in the commit message, bpf_get_stack() has a return
> value and if I don't use it I'll get result unused warning from clang,
> so either this or:
> int __attribute__((unused)) len;
> len = bpf_get_stack(ctx, stack_p->array, MAX_STACKS * sizeof(u64),
>
> We don't need error handling, it goes to "next:" naturally, there's no
> code in between.
Perhaps capture that as a comment in the `if` and/or add a continue,
just to show that the code doesn't care about errors.
Thanks,
Ian
> >
> > > +
> > > next:
> > > pelem = bpf_task_storage_get(&tstamp, next, NULL, 0);
> > >
> > > @@ -223,11 +288,19 @@ static int off_cpu_stat(u64 *ctx, struct task_struct *prev,
> > > __u64 delta = ts - pelem->timestamp;
> > > __u64 *total;
> > >
> > > - total = bpf_map_lookup_elem(&off_cpu, &key);
> > > - if (total)
> > > - *total += delta;
> > > - else
> > > - bpf_map_update_elem(&off_cpu, &key, &delta, BPF_ANY);
> > > + if (delta >= offcpu_thresh) {
> > > + int zero = 0;
> > > + struct offcpu_data *data = bpf_map_lookup_elem(&offcpu_payload, &zero);
> > > +
> > > + if (data)
> > > + off_cpu_dump(ctx, data, &key, &pelem->stack, delta, pelem->timestamp);
> > > + } else {
> > > + total = bpf_map_lookup_elem(&off_cpu, &key);
> > > + if (total)
> > > + *total += delta;
> > > + else
> > > + bpf_map_update_elem(&off_cpu, &key, &delta, BPF_ANY);
> > > + }
> >
> > Looks good! :-)
> >
> > Thanks,
> > Ian
> >
> > >
> > > /* prevent to reuse the timestamp later */
> > > pelem->timestamp = 0;
> > > --
> > > 2.43.0
> > >
>
> Thanks,
> Howard