Re: [PATCH bpf-next] bpf: allow bpf_current_task_under_cgroup() with BPF_CGROUP_*

From: Andrii Nakryiko
Date: Mon Jul 22 2024 - 17:08:48 EST


On Mon, Jul 22, 2024 at 11:21 AM <technoboy85@xxxxxxxxx> wrote:
>
> From: Matteo Croce <teknoraver@xxxxxxxx>
>
> The helper bpf_current_task_under_cgroup() currently is only allowed for
> tracing programs.
> Allow its usage also in the BPF_CGROUP_* program types.
> Move the code from kernel/trace/bpf_trace.c to kernel/bpf/cgroup.c,
> so it compiles also without CONFIG_BPF_EVENTS.
>
> Signed-off-by: Matteo Croce <teknoraver@xxxxxxxx>
> ---
> include/linux/bpf.h | 1 +
> kernel/bpf/cgroup.c | 25 +++++++++++++++++++++++++
> kernel/trace/bpf_trace.c | 27 ++-------------------------
> 3 files changed, 28 insertions(+), 25 deletions(-)
>

It seems fine to allow this, but also note that we have
bpf_task_under_cgroup() kfunc, which you might want to check if it is
allowed where you need it as well.

And the latter one is defined in kernel/bpf/helpers.c, so I'd move
this one next to it to keep them close.

pw-bot: cr


> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index 4f1d4a97b9d1..4000fd161dda 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -3188,6 +3188,7 @@ extern const struct bpf_func_proto bpf_sock_hash_update_proto;
> extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
> extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
> extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto;
> +extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto;
> extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
> extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
> extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
> diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
> index 8ba73042a239..b99add9570e6 100644
> --- a/kernel/bpf/cgroup.c
> +++ b/kernel/bpf/cgroup.c
> @@ -2308,6 +2308,29 @@ static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
> };
> #endif
>
> +BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
> +{
> + struct bpf_array *array = container_of(map, struct bpf_array, map);
> + struct cgroup *cgrp;
> +
> + if (unlikely(idx >= array->map.max_entries))
> + return -E2BIG;
> +
> + cgrp = READ_ONCE(array->ptrs[idx]);
> + if (unlikely(!cgrp))
> + return -EAGAIN;
> +
> + return task_under_cgroup_hierarchy(current, cgrp);
> +}
> +
> +const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
> + .func = bpf_current_task_under_cgroup,
> + .gpl_only = false,
> + .ret_type = RET_INTEGER,
> + .arg1_type = ARG_CONST_MAP_PTR,
> + .arg2_type = ARG_ANYTHING,
> +};
> +
> static const struct bpf_func_proto *
> cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> {
> @@ -2581,6 +2604,8 @@ cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> case BPF_FUNC_get_cgroup_classid:
> return &bpf_get_cgroup_classid_curr_proto;
> #endif
> + case BPF_FUNC_current_task_under_cgroup:
> + return &bpf_current_task_under_cgroup_proto;
> default:
> return NULL;
> }
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index cd098846e251..ea5cdd122024 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -798,29 +798,6 @@ const struct bpf_func_proto bpf_task_pt_regs_proto = {
> .ret_btf_id = &bpf_task_pt_regs_ids[0],
> };
>
> -BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
> -{
> - struct bpf_array *array = container_of(map, struct bpf_array, map);
> - struct cgroup *cgrp;
> -
> - if (unlikely(idx >= array->map.max_entries))
> - return -E2BIG;
> -
> - cgrp = READ_ONCE(array->ptrs[idx]);
> - if (unlikely(!cgrp))
> - return -EAGAIN;
> -
> - return task_under_cgroup_hierarchy(current, cgrp);
> -}
> -
> -static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
> - .func = bpf_current_task_under_cgroup,
> - .gpl_only = false,
> - .ret_type = RET_INTEGER,
> - .arg1_type = ARG_CONST_MAP_PTR,
> - .arg2_type = ARG_ANYTHING,
> -};
> -
> struct send_signal_irq_work {
> struct irq_work irq_work;
> struct task_struct *task;
> @@ -1548,8 +1525,6 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> return &bpf_get_numa_node_id_proto;
> case BPF_FUNC_perf_event_read:
> return &bpf_perf_event_read_proto;
> - case BPF_FUNC_current_task_under_cgroup:
> - return &bpf_current_task_under_cgroup_proto;
> case BPF_FUNC_get_prandom_u32:
> return &bpf_get_prandom_u32_proto;
> case BPF_FUNC_probe_write_user:
> @@ -1578,6 +1553,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> return &bpf_cgrp_storage_get_proto;
> case BPF_FUNC_cgrp_storage_delete:
> return &bpf_cgrp_storage_delete_proto;
> + case BPF_FUNC_current_task_under_cgroup:
> + return &bpf_current_task_under_cgroup_proto;
> #endif
> case BPF_FUNC_send_signal:
> return &bpf_send_signal_proto;
> --
> 2.45.2
>
>