[PATCH bpf-next 3/3] bpf/selftests: Verify struct_ops prog sleepable behavior

From: David Vernet
Date: Mon Jan 23 2023 - 18:22:49 EST


In a set of prior changes, we added the ability for struct_ops programs
to be sleepable. This patch enhances the dummy_st_ops selftest suite to
validate this behavior by adding a new struct_ops entry to dummy_st_ops
which calls a KF_SLEEPABLE kfunc.

Signed-off-by: David Vernet <void@xxxxxxxxxxxxx>
---
include/linux/bpf.h | 1 +
net/bpf/bpf_dummy_struct_ops.c | 18 ++++
net/bpf/test_run.c | 6 ++
.../selftests/bpf/prog_tests/dummy_st_ops.c | 85 +++++++++++++++++--
.../selftests/bpf/progs/dummy_st_ops.c | 11 +++
5 files changed, 113 insertions(+), 8 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b30739634947..34b90bda3eed 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1473,6 +1473,7 @@ struct bpf_dummy_ops {
int (*test_1)(struct bpf_dummy_ops_state *cb);
int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
char a3, unsigned long a4);
+ int (*test_3)(struct bpf_dummy_ops_state *cb);
};

int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
index 1ac4467928a9..46099737d1da 100644
--- a/net/bpf/bpf_dummy_struct_ops.c
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -154,6 +154,23 @@ static bool bpf_dummy_ops_is_valid_access(int off, int size,
return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
}

+static int bpf_dummy_ops_check_member(const struct btf_type *t,
+ const struct btf_member *member,
+ const struct bpf_prog *prog)
+{
+ u32 moff = __btf_member_bit_offset(t, member) / 8;
+
+ switch (moff) {
+ case offsetof(struct bpf_dummy_ops, test_3):
+ break;
+ default:
+ if (prog->aux->sleepable)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
@@ -208,6 +225,7 @@ static void bpf_dummy_unreg(void *kdata)
struct bpf_struct_ops bpf_bpf_dummy_ops = {
.verifier_ops = &bpf_dummy_verifier_ops,
.init = bpf_dummy_init,
+ .check_member = bpf_dummy_ops_check_member,
.init_member = bpf_dummy_init_member,
.reg = bpf_dummy_reg,
.unreg = bpf_dummy_unreg,
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 2723623429ac..cce1be49a3b7 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -730,6 +730,10 @@ noinline void bpf_kfunc_call_test_destructive(void)
{
}

+noinline void bpf_kfunc_call_test_sleepable(void)
+{
+}
+
__diag_pop();

BTF_SET8_START(bpf_test_modify_return_ids)
@@ -767,6 +771,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
+BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
BTF_SET8_END(test_sk_check_kfunc_ids)

static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
@@ -1677,6 +1682,7 @@ static int __init bpf_prog_test_run_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_prog_test_kfunc_set);
return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc,
ARRAY_SIZE(bpf_prog_test_dtor_kfunc),
THIS_MODULE);
diff --git a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
index c11832657d2b..fd496ef8f905 100644
--- a/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
+++ b/tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
@@ -9,13 +9,37 @@ struct bpf_dummy_ops_state {
int val;
};

+static struct dummy_st_ops *open_load_skel(void)
+{
+ int err;
+ struct dummy_st_ops *skel;
+
+ skel = dummy_st_ops__open();
+ if (!ASSERT_OK_PTR(skel, "dummy_st_ops_open"))
+ return NULL;
+
+ err = bpf_program__set_flags(skel->progs.test_3, BPF_F_SLEEPABLE);
+ if (!ASSERT_OK(err, "set_sleepable")) {
+ dummy_st_ops__destroy(skel);
+ return NULL;
+ }
+
+ err = dummy_st_ops__load(skel);
+ if (!ASSERT_OK(err, "dummy_st_ops_load")) {
+ dummy_st_ops__destroy(skel);
+ return NULL;
+ }
+
+ return skel;
+}
+
static void test_dummy_st_ops_attach(void)
{
struct dummy_st_ops *skel;
struct bpf_link *link;

- skel = dummy_st_ops__open_and_load();
- if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ skel = open_load_skel();
+ if (!skel)
return;

link = bpf_map__attach_struct_ops(skel->maps.dummy_1);
@@ -34,8 +58,8 @@ static void test_dummy_init_ret_value(void)
struct dummy_st_ops *skel;
int fd, err;

- skel = dummy_st_ops__open_and_load();
- if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ skel = open_load_skel();
+ if (!skel)
return;

fd = bpf_program__fd(skel->progs.test_1);
@@ -61,8 +85,8 @@ static void test_dummy_init_ptr_arg(void)
struct dummy_st_ops *skel;
int fd, err;

- skel = dummy_st_ops__open_and_load();
- if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ skel = open_load_skel();
+ if (!skel)
return;

fd = bpf_program__fd(skel->progs.test_1);
@@ -107,8 +131,8 @@ static void test_dummy_multiple_args(void)
size_t i;
char name[8];

- skel = dummy_st_ops__open_and_load();
- if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
+ skel = open_load_skel();
+ if (!skel)
return;

fd = bpf_program__fd(skel->progs.test_2);
@@ -122,6 +146,47 @@ static void test_dummy_multiple_args(void)
dummy_st_ops__destroy(skel);
}

+static void test_dummy_sleepable(void)
+{
+ __u64 args[1] = {0};
+ LIBBPF_OPTS(bpf_test_run_opts, attr,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+ );
+ struct dummy_st_ops *skel;
+ int fd, err;
+
+ skel = open_load_skel();
+ if (!skel)
+ return;
+
+ fd = bpf_program__fd(skel->progs.test_3);
+ err = bpf_prog_test_run_opts(fd, &attr);
+ ASSERT_OK(err, "test_run");
+
+ dummy_st_ops__destroy(skel);
+}
+
+static void test_dummy_sleepable_disallowed(void)
+{
+ struct dummy_st_ops *skel;
+ int err;
+
+ skel = dummy_st_ops__open();
+ if (!ASSERT_OK_PTR(skel, "dummy_st_ops_open"))
+ goto out;
+
+ err = bpf_program__set_flags(skel->progs.test_1, BPF_F_SLEEPABLE);
+ if (!ASSERT_OK(err, "set_sleepable"))
+ goto out;
+
+ err = dummy_st_ops__load(skel);
+ ASSERT_ERR(err, "dummy_st_ops_load");
+
+out:
+ dummy_st_ops__destroy(skel);
+}
+
void test_dummy_st_ops(void)
{
if (test__start_subtest("dummy_st_ops_attach"))
@@ -132,4 +197,8 @@ void test_dummy_st_ops(void)
test_dummy_init_ptr_arg();
if (test__start_subtest("dummy_multiple_args"))
test_dummy_multiple_args();
+ if (test__start_subtest("dummy_sleepable"))
+ test_dummy_sleepable();
+ if (test__start_subtest("dummy_not_sleepable"))
+ test_dummy_sleepable_disallowed();
}
diff --git a/tools/testing/selftests/bpf/progs/dummy_st_ops.c b/tools/testing/selftests/bpf/progs/dummy_st_ops.c
index ead87edb75e2..721886867efb 100644
--- a/tools/testing/selftests/bpf/progs/dummy_st_ops.c
+++ b/tools/testing/selftests/bpf/progs/dummy_st_ops.c
@@ -12,10 +12,13 @@ struct bpf_dummy_ops {
int (*test_1)(struct bpf_dummy_ops_state *state);
int (*test_2)(struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
char a3, unsigned long a4);
+ int (*test_3)(struct bpf_dummy_ops_state *state);
};

char _license[] SEC("license") = "GPL";

+void bpf_kfunc_call_test_sleepable(void) __ksym;
+
SEC("struct_ops/test_1")
int BPF_PROG(test_1, struct bpf_dummy_ops_state *state)
{
@@ -43,8 +46,16 @@ int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a
return 0;
}

+SEC("struct_ops/test_3")
+int BPF_PROG(test_3, struct bpf_dummy_ops_state *state)
+{
+ bpf_kfunc_call_test_sleepable();
+ return 0;
+}
+
SEC(".struct_ops")
struct bpf_dummy_ops dummy_1 = {
.test_1 = (void *)test_1,
.test_2 = (void *)test_2,
+ .test_3 = (void *)test_3,
};
--
2.39.0