[PATCH bpf-next v9 9/9] selftests/bpf: Add test cases for bpf_list_del/add/is_first/is_last/empty

From: Chengkaitao

Date: Sun Mar 29 2026 - 10:08:04 EST


From: Kaitao Cheng <chengkaitao@xxxxxxxxxx>

Extend refcounted_kptr with tests for bpf_list_add (including prev from
bpf_list_front and bpf_refcount_acquire), bpf_list_del (including node
from bpf_rbtree_remove and bpf_refcount_acquire), bpf_list_empty,
bpf_list_is_first/last, and push_back on uninit head.

To verify the validity of bpf_list_del/add, the test also expects the
verifier to reject calls to bpf_list_del/add made without holding the
spin_lock.

Signed-off-by: Kaitao Cheng <chengkaitao@xxxxxxxxxx>
---
.../testing/selftests/bpf/bpf_experimental.h | 16 +
.../selftests/bpf/progs/refcounted_kptr.c | 311 ++++++++++++++++++
2 files changed, 327 insertions(+)

diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 44466acf8083..5821f0000e1f 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -85,6 +85,22 @@ extern int bpf_list_push_back_impl(struct bpf_list_head *head,
/* Convenience macro to wrap over bpf_list_push_back_impl */
#define bpf_list_push_back(head, node) bpf_list_push_back_impl(head, node, NULL, 0)

+/* Description
+ * Insert 'new' after 'prev' in the BPF linked list with head 'head'.
+ * The bpf_spin_lock protecting the list must be held. 'prev' must already
+ * be in that list; 'new' must not be in any list. The 'meta' and 'off'
+ * parameters are rewritten by the verifier, no need for BPF programs to
+ * set them.
+ * Returns
+ * 0 on success, -EINVAL if head is NULL, prev is not in the list with head,
+ * or new is already in a list.
+ */
+extern int bpf_list_add_impl(struct bpf_list_head *head, struct bpf_list_node *new,
+ struct bpf_list_node *prev, void *meta, __u64 off) __ksym;
+
+/* Convenience macro to wrap over bpf_list_add_impl */
+#define bpf_list_add(head, new, prev) bpf_list_add_impl(head, new, prev, NULL, 0)
+
/* Description
* Remove the entry at the beginning of the BPF linked list.
* Returns
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
index c847398837cc..e5558994a76d 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -367,6 +367,317 @@ long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \
INSERT_STASH_READ(true, "insert_stash_read: remove from tree");
INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree");

+SEC("tc")
+__description("list_empty_test: list empty before add, non-empty after add")
+__success __retval(0)
+int list_empty_test(void *ctx)
+{
+ struct node_data *node_new;
+
+ bpf_spin_lock(&lock);
+ if (!bpf_list_empty(&head)) {
+ bpf_spin_unlock(&lock);
+ return -1;
+ }
+ bpf_spin_unlock(&lock);
+
+ node_new = bpf_obj_new(typeof(*node_new));
+ if (!node_new)
+ return -2;
+
+ bpf_spin_lock(&lock);
+ bpf_list_push_front(&head, &node_new->l);
+
+ if (bpf_list_empty(&head)) {
+ bpf_spin_unlock(&lock);
+ return -3;
+ }
+ bpf_spin_unlock(&lock);
+ return 0;
+}
+
+static struct node_data *__add_in_list(struct bpf_list_head *head,
+ struct bpf_spin_lock *lock)
+{
+ struct node_data *node_new, *node_ref;
+
+ node_new = bpf_obj_new(typeof(*node_new));
+ if (!node_new)
+ return NULL;
+
+ node_ref = bpf_refcount_acquire(node_new);
+
+ bpf_spin_lock(lock);
+ bpf_list_push_front(head, &node_new->l);
+ bpf_spin_unlock(lock);
+ return node_ref;
+}
+
+SEC("tc")
+__description("list_is_edge_test: is_first on first node, is_last on last node")
+__success __retval(0)
+int list_is_edge_test(void *ctx)
+{
+ struct node_data *node_first, *node_last;
+ int err = 0;
+
+ node_last = __add_in_list(&head, &lock);
+ if (!node_last)
+ return -1;
+
+ node_first = __add_in_list(&head, &lock);
+ if (!node_first) {
+ bpf_obj_drop(node_last);
+ return -2;
+ }
+
+ bpf_spin_lock(&lock);
+ if (!bpf_list_is_first(&head, &node_first->l)) {
+ err = -3;
+ goto fail;
+ }
+ if (!bpf_list_is_last(&head, &node_last->l))
+ err = -4;
+
+fail:
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(node_first);
+ bpf_obj_drop(node_last);
+ return err;
+}
+
+SEC("tc")
+__description("list_del_test1: del returns removed nodes")
+__success __retval(0)
+int list_del_test1(void *ctx)
+{
+ struct node_data *node_first, *node_last;
+ struct bpf_list_node *bpf_node_first, *bpf_node_last;
+ int err = 0;
+
+ node_last = __add_in_list(&head, &lock);
+ if (!node_last)
+ return -1;
+
+ node_first = __add_in_list(&head, &lock);
+ if (!node_first) {
+ bpf_obj_drop(node_last);
+ return -2;
+ }
+
+ bpf_spin_lock(&lock);
+ bpf_node_last = bpf_list_del(&head, &node_last->l);
+ bpf_node_first = bpf_list_del(&head, &node_first->l);
+ bpf_spin_unlock(&lock);
+
+ if (bpf_node_first)
+ bpf_obj_drop(container_of(bpf_node_first, struct node_data, l));
+ else
+ err = -3;
+
+ if (bpf_node_last)
+ bpf_obj_drop(container_of(bpf_node_last, struct node_data, l));
+ else
+ err = -4;
+
+ bpf_obj_drop(node_first);
+ bpf_obj_drop(node_last);
+ return err;
+}
+
+SEC("tc")
+__description("list_del_test2: remove an arbitrary node from the list")
+__success __retval(0)
+int list_del_test2(void *ctx)
+{
+ struct bpf_rb_node *rb;
+ struct bpf_list_node *l;
+ struct node_data *n;
+ long err;
+
+ err = __insert_in_tree_and_list(&head, &root, &lock);
+ if (err)
+ return err;
+
+ bpf_spin_lock(&lock);
+ rb = bpf_rbtree_first(&root);
+ if (!rb) {
+ bpf_spin_unlock(&lock);
+ return -4;
+ }
+
+ rb = bpf_rbtree_remove(&root, rb);
+ if (!rb) {
+ bpf_spin_unlock(&lock);
+ return -5;
+ }
+
+ n = container_of(rb, struct node_data, r);
+ l = bpf_list_del(&head, &n->l);
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(n);
+ if (!l)
+ return -6;
+
+ bpf_obj_drop(container_of(l, struct node_data, l));
+ return 0;
+}
+
+SEC("tc")
+__description("list_add_test1: insert new node after prev")
+__success __retval(0)
+int list_add_test1(void *ctx)
+{
+ struct node_data *node_first;
+ struct node_data *new_node;
+ long err = 0;
+
+ node_first = __add_in_list(&head, &lock);
+ if (!node_first)
+ return -1;
+
+ new_node = bpf_obj_new(typeof(*new_node));
+ if (!new_node) {
+ err = -2;
+ goto fail;
+ }
+
+ bpf_spin_lock(&lock);
+ err = bpf_list_add(&head, &new_node->l, &node_first->l);
+ bpf_spin_unlock(&lock);
+ if (err) {
+ err = -3;
+ goto fail;
+ }
+
+fail:
+ bpf_obj_drop(node_first);
+ return 0;
+}
+
+SEC("tc")
+__description("list_add_test2: list_add accepts list_front return value as prev")
+__success __retval(0)
+int list_add_test2(void *ctx)
+{
+ struct node_data *new_node, *tmp;
+ struct bpf_list_node *bpf_node;
+ long err = 0;
+
+ tmp = __add_in_list(&head, &lock);
+ if (!tmp)
+ return -1;
+
+ new_node = bpf_obj_new(typeof(*new_node));
+ if (!new_node) {
+ err = -2;
+ goto fail;
+ }
+
+ bpf_spin_lock(&lock);
+ bpf_node = bpf_list_front(&head);
+ if (!bpf_node) {
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(new_node);
+ err = -3;
+ goto fail;
+ }
+
+ err = bpf_list_add(&head, &new_node->l, bpf_node);
+ bpf_spin_unlock(&lock);
+ if (err) {
+ err = -4;
+ goto fail;
+ }
+
+fail:
+ bpf_obj_drop(tmp);
+ return err;
+}
+
+struct uninit_head_val {
+ struct bpf_spin_lock lock;
+ struct bpf_list_head head __contains(node_data, l);
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct uninit_head_val);
+ __uint(max_entries, 1);
+} uninit_head_map SEC(".maps");
+
+SEC("tc")
+__description("list_push_back_uninit_head: push_back on 0-initialized list head")
+__success __retval(0)
+int list_push_back_uninit_head(void *ctx)
+{
+ struct uninit_head_val *st;
+ struct node_data *node;
+ int ret = -1, key = 0;
+
+ st = bpf_map_lookup_elem(&uninit_head_map, &key);
+ if (!st)
+ return -1;
+
+ node = bpf_obj_new(typeof(*node));
+ if (!node)
+ return -1;
+
+ bpf_spin_lock(&st->lock);
+ ret = bpf_list_push_back(&st->head, &node->l);
+ bpf_spin_unlock(&st->lock);
+
+ return ret;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head")
+long list_del_without_lock_fail(void *ctx)
+{
+ struct bpf_rb_node *rb;
+ struct bpf_list_node *l;
+ struct node_data *n;
+
+ bpf_spin_lock(&lock);
+ rb = bpf_rbtree_first(&root);
+ bpf_spin_unlock(&lock);
+ if (!rb)
+ return -1;
+
+ n = container_of(rb, struct node_data, r);
+ /* Error case: delete list node without holding lock */
+ l = bpf_list_del(&head, &n->l);
+ if (!l)
+ return -2;
+ bpf_obj_drop(container_of(l, struct node_data, l));
+
+ return 0;
+}
+
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head")
+long list_add_without_lock_fail(void *ctx)
+{
+ struct bpf_rb_node *rb;
+ struct bpf_list_node *l;
+ struct node_data *n;
+
+ bpf_spin_lock(&lock);
+ rb = bpf_rbtree_first(&root);
+ l = bpf_list_front(&head);
+ bpf_spin_unlock(&lock);
+ if (!rb || !l)
+ return -1;
+
+ n = container_of(l, struct node_data, l);
+ /* Error case: add list node without holding lock */
+ if (bpf_list_add(&head, &n->l, l))
+ return -2;
+
+ return 0;
+}
+
SEC("tc")
__success
long rbtree_refcounted_node_ref_escapes(void *ctx)
--
2.50.1 (Apple Git-155)