Re: [PATCH 2/2] bpf/selftests: Test skipping stacktrace

From: Namhyung Kim
Date: Thu Mar 10 2022 - 19:40:47 EST


On Thu, Mar 10, 2022 at 3:22 PM Yonghong Song <yhs@xxxxxx> wrote:
>
>
>
> On 3/10/22 12:22 AM, Namhyung Kim wrote:
> > Add a test case for stacktrace with skip > 0 using a small sized
> > buffer. It didn't support skipping entries greater than or equal to
> > the size of buffer and filled the skipped part with 0.
> >
> > Signed-off-by: Namhyung Kim <namhyung@xxxxxxxxxx>
> > ---
> > .../bpf/prog_tests/stacktrace_map_skip.c | 72 ++++++++++++++++
> > .../selftests/bpf/progs/stacktrace_map_skip.c | 82 +++++++++++++++++++
> > 2 files changed, 154 insertions(+)
> > create mode 100644 tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
> > create mode 100644 tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
> >
> > diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
> > new file mode 100644
> > index 000000000000..bcb244aa3c78
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c
> > @@ -0,0 +1,72 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +#include <test_progs.h>
> > +#include "stacktrace_map_skip.skel.h"
> > +
> > +#define TEST_STACK_DEPTH 2
> > +
> > +void test_stacktrace_map_skip(void)
> > +{
> > + struct stacktrace_map_skip *skel;
> > + int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
> > + int err, stack_trace_len;
> > + __u32 key, val, duration = 0;
> > +
> > + skel = stacktrace_map_skip__open_and_load();
> > + if (CHECK(!skel, "skel_open_and_load", "skeleton open failed\n"))
> > + return;
>
> Please use ASSERT_* macros instead of CHECK* macros.
> You can see other prog_tests/*.c files for examples.

I'll take a look and make the changes.

>
> > +
> > + /* find map fds */
> > + control_map_fd = bpf_map__fd(skel->maps.control_map);
> > + if (CHECK_FAIL(control_map_fd < 0))
> > + goto out;
> > +
> > + stackid_hmap_fd = bpf_map__fd(skel->maps.stackid_hmap);
> > + if (CHECK_FAIL(stackid_hmap_fd < 0))
> > + goto out;
> > +
> > + stackmap_fd = bpf_map__fd(skel->maps.stackmap);
> > + if (CHECK_FAIL(stackmap_fd < 0))
> > + goto out;
> > +
> > + stack_amap_fd = bpf_map__fd(skel->maps.stack_amap);
> > + if (CHECK_FAIL(stack_amap_fd < 0))
> > + goto out;
> > +
> > + err = stacktrace_map_skip__attach(skel);
> > + if (CHECK(err, "skel_attach", "skeleton attach failed\n"))
> > + goto out;
> > +
> > + /* give some time for bpf program run */
> > + sleep(1);
> > +
> > + /* disable stack trace collection */
> > + key = 0;
> > + val = 1;
> > + bpf_map_update_elem(control_map_fd, &key, &val, 0);
> > +
> > + /* for every element in stackid_hmap, we can find a corresponding one
> > + * in stackmap, and vise versa.
> > + */
> > + err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
> > + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
> > + "err %d errno %d\n", err, errno))
> > + goto out;
> > +
> > + err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
> > + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
> > + "err %d errno %d\n", err, errno))
> > + goto out;
> > +
> > + stack_trace_len = TEST_STACK_DEPTH * sizeof(__u64);
> > + err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
> > + if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
> > + "err %d errno %d\n", err, errno))
> > + goto out;
> > +
> > + if (CHECK(skel->bss->failed, "check skip",
> > + "failed to skip some depth: %d", skel->bss->failed))
> > + goto out;
> > +
> > +out:
> > + stacktrace_map_skip__destroy(skel);
> > +}
> > diff --git a/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
> > new file mode 100644
> > index 000000000000..323248b17ae4
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/progs/stacktrace_map_skip.c
> > @@ -0,0 +1,82 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +#include <vmlinux.h>
> > +#include <bpf/bpf_helpers.h>
> > +
> > +#define TEST_STACK_DEPTH 2
> > +
> > +struct {
> > + __uint(type, BPF_MAP_TYPE_ARRAY);
> > + __uint(max_entries, 1);
> > + __type(key, __u32);
> > + __type(value, __u32);
> > +} control_map SEC(".maps");
>
> You can use a global variable for this.
> The global variable can be assigned a value (if needed, e.g., non-zero)
> before skeleton open and load.

Right, will change.

>
> > +
> > +struct {
> > + __uint(type, BPF_MAP_TYPE_HASH);
> > + __uint(max_entries, 16384);
> > + __type(key, __u32);
> > + __type(value, __u32);
> > +} stackid_hmap SEC(".maps");
> > +
> > +typedef __u64 stack_trace_t[TEST_STACK_DEPTH];
> > +
> > +struct {
> > + __uint(type, BPF_MAP_TYPE_STACK_TRACE);
> > + __uint(max_entries, 16384);
> > + __type(key, __u32);
> > + __type(value, stack_trace_t);
> > +} stackmap SEC(".maps");
> > +
> > +struct {
> > + __uint(type, BPF_MAP_TYPE_ARRAY);
> > + __uint(max_entries, 16384);
> > + __type(key, __u32);
> > + __type(value, stack_trace_t);
> > +} stack_amap SEC(".maps");
> > +
> > +/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
> > +struct sched_switch_args {
> > + unsigned long long pad;
> > + char prev_comm[TASK_COMM_LEN];
> > + int prev_pid;
> > + int prev_prio;
> > + long long prev_state;
> > + char next_comm[TASK_COMM_LEN];
> > + int next_pid;
> > + int next_prio;
> > +};
>
> You can use this structure in vmlinux.h instead of the above:
> struct trace_event_raw_sched_switch {
> struct trace_entry ent;
> char prev_comm[16];
> pid_t prev_pid;
> int prev_prio;
> long int prev_state;
> char next_comm[16];
> pid_t next_pid;
> int next_prio;
> char __data[0];
> };

Looks good, will change.

>
> > +
> > +int failed = 0;
> > +
> > +SEC("tracepoint/sched/sched_switch")
> > +int oncpu(struct sched_switch_args *ctx)
> > +{
> > + __u32 max_len = TEST_STACK_DEPTH * sizeof(__u64);
> > + __u32 key = 0, val = 0, *value_p;
> > + __u64 *stack_p;
> > +
> > + value_p = bpf_map_lookup_elem(&control_map, &key);
> > + if (value_p && *value_p)
> > + return 0; /* skip if non-zero *value_p */
> > +
> > + /* it should allow skipping whole buffer size entries */
> > + key = bpf_get_stackid(ctx, &stackmap, TEST_STACK_DEPTH);
> > + if ((int)key >= 0) {
> > + /* The size of stackmap and stack_amap should be the same */
> > + bpf_map_update_elem(&stackid_hmap, &key, &val, 0);
> > + stack_p = bpf_map_lookup_elem(&stack_amap, &key);
> > + if (stack_p) {
> > + bpf_get_stack(ctx, stack_p, max_len, TEST_STACK_DEPTH);
> > + /* it wrongly skipped all the entries and filled zero */
> > + if (stack_p[0] == 0)
> > + failed = 1;
> > + }
> > + } else if ((int)key == -14/*EFAULT*/) {
> > + /* old kernel doesn't support skipping that many entries */
> > + failed = 2;
>
> The selftest is supposed to run with the kernel in the same code base.
> So it is okay to skip the above 'if' test and just set failed = 2.

I see. I will make the change.

Thanks,
Namhyung

>
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +char _license[] SEC("license") = "GPL";