Re: [PATCH V2 bpf-next 2/2] selftests/bpf: add selftest for bpf_task_get_cgroup

From: Jose Fernandez
Date: Mon Mar 18 2024 - 20:31:36 EST


On 24/03/18 10:58AM, Jiri Olsa wrote:
> On Sat, Mar 16, 2024 at 10:22:41AM -0600, Jose Fernandez wrote:
>
> SNIP
>
> > +void test_task_get_cgroup(void)
> > +{
> > + struct test_task_get_cgroup *skel;
> > + int err, fd;
> > + pid_t pid;
> > + __u64 cgroup_id, expected_cgroup_id;
> > + const struct timespec req = {
> > + .tv_sec = 1,
> > + .tv_nsec = 0,
> > + };
> > +
> > + fd = test__join_cgroup(TEST_CGROUP);
> > + if (!ASSERT_OK(fd < 0, "test_join_cgroup_TEST_CGROUP"))
> > + return;
> > +
> > + skel = test_task_get_cgroup__open();
> > + if (!ASSERT_OK_PTR(skel, "test_task_get_cgroup__open"))
> > + goto cleanup;
> > +
> > + err = test_task_get_cgroup__load(skel);
> > + if (!ASSERT_OK(err, "test_task_get_cgroup__load"))
> > + goto cleanup;
>
> nit, you could call test_task_get_cgroup__open_and_load

I'll rename.

> > +
> > + err = test_task_get_cgroup__attach(skel);
> > + if (!ASSERT_OK(err, "test_task_get_cgroup__attach"))
> > + goto cleanup;
> > +
> > + pid = getpid();
> > + expected_cgroup_id = get_cgroup_id(TEST_CGROUP);
> > + if (!ASSERT_GT(expected_cgroup_id, 0, "get_cgroup_id"))
> > + goto cleanup;
> > +
> > + /* Trigger nanosleep to enter the sched_switch tracepoint */
> > + /* The previous task should be this process */
> > + syscall(__NR_nanosleep, &req, NULL);
>
> would smaller sleep do? also we have our own usleep (in test_progs.c)
> that calls nanosleep

Yes a smaller sleep should be fine.
I'll reduce the sleep and use the usleep helper.

> > +
> > + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.pid_to_cgid_map), &pid,
> > + &cgroup_id);
> > +
> > + if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
> > + goto cleanup;
> > +
> > + ASSERT_EQ(cgroup_id, expected_cgroup_id, "cgroup_id");
> > +
> > +cleanup:
> > + test_task_get_cgroup__destroy(skel);
> > + close(fd);
> > +}
> > diff --git a/tools/testing/selftests/bpf/progs/test_task_get_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_get_cgroup.c
> > new file mode 100644
> > index 000000000000..580f8f0657d5
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/progs/test_task_get_cgroup.c
> > @@ -0,0 +1,37 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +// Copyright 2024 Netflix, Inc.
> > +
> > +#include "vmlinux.h"
> > +#include <bpf/bpf_helpers.h>
> > +#include <bpf/bpf_tracing.h>
> > +
> > +struct cgroup *bpf_task_get_cgroup(struct task_struct *task) __ksym;
> > +void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
> > +
> > +struct {
> > + __uint(type, BPF_MAP_TYPE_HASH);
> > + __uint(max_entries, 4096);
> > + __type(key, __u32);
> > + __type(value, __u64);
> > +} pid_to_cgid_map SEC(".maps");
> > +
> > +SEC("tp_btf/sched_switch")
> > +int BPF_PROG(sched_switch, bool preempt, struct task_struct *prev,
> > + struct task_struct *next)
> > +{
> > + struct cgroup *cgrp;
> > + u64 cgroup_id;
> > + u32 pid;
> > +
>
> could you filter for your pid in here like we do in other places,
> (eg in progs/kprobe_multi.c)
>
> in which case you won't need hash map, but just a single value
> to store the cgroup id to
>
> jirka

I'll apply this suggestion as well and include it in V3.
Thanks for the feedback.

>
> > + cgrp = bpf_task_get_cgroup(prev);
> > + if (cgrp == NULL)
> > + return 0;
> > + cgroup_id = cgrp->kn->id;
> > + pid = prev->pid;
> > + bpf_map_update_elem(&pid_to_cgid_map, &pid, &cgroup_id, BPF_ANY);
> > +
> > + bpf_cgroup_release(cgrp);
> > + return 0;
> > +}
> > +
> > +char _license[] SEC("license") = "GPL";
> > --
> > 2.40.1
> >