[PATCH v5 4/6] selftests/bpf: Add test case for bpf_list_add_impl
From: Chengkaitao
Date: Tue Mar 03 2026 - 22:19:30 EST
From: Kaitao Cheng <chengkaitao@xxxxxxxxxx>
Extend refcounted_kptr test to exercise bpf_list_add:
add a second node after the first, then bpf_list_del both nodes.
To verify the validity of bpf_list_add, also expect the verifier
to reject calls to bpf_list_add made without holding the spin_lock.
Signed-off-by: Kaitao Cheng <chengkaitao@xxxxxxxxxx>
---
.../testing/selftests/bpf/bpf_experimental.h | 16 +++
.../selftests/bpf/progs/refcounted_kptr.c | 122 ++++++++++++++++--
2 files changed, 124 insertions(+), 14 deletions(-)
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index 54ec9d307fdc..fdcc7a054095 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -110,6 +110,22 @@ extern struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) __ksy
extern struct bpf_list_node *bpf_list_del(struct bpf_list_head *head,
struct bpf_list_node *node) __ksym;
+/* Description
+ * Insert 'new' after 'prev' in the BPF linked list with head 'head'.
+ * The bpf_spin_lock protecting the list must be held. 'prev' must already
+ * be in that list; 'new' must not be in any list. The 'meta' and 'off'
+ * parameters are rewritten by the verifier, no need for BPF programs to
+ * set them.
+ * Returns
+ * 0 on success, -EINVAL if head is NULL, prev is not in the list with head,
+ * or new is already in a list.
+ */
+extern int bpf_list_add_impl(struct bpf_list_head *head, struct bpf_list_node *new,
+ struct bpf_list_node *prev, void *meta, __u64 off) __ksym;
+
+/* Convenience macro to wrap over bpf_list_add_impl */
+#define bpf_list_add(head, new, prev) bpf_list_add_impl(head, new, prev, NULL, 0)
+
/* Description
* Remove 'node' from rbtree with root 'root'
* Returns
diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
index ac7672cfefb8..5a83274e1d26 100644
--- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c
+++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c
@@ -367,18 +367,19 @@ long insert_rbtree_and_stash__del_tree_##rem_tree(void *ctx) \
INSERT_STASH_READ(true, "insert_stash_read: remove from tree");
INSERT_STASH_READ(false, "insert_stash_read: don't remove from tree");
-/* Insert node_data into both rbtree and list, remove from tree, then remove
- * from list via bpf_list_del using the node obtained from the tree.
+/* Insert one node in tree and list, remove it from tree, add a second
+ * node after it in list with bpf_list_add, then remove both nodes from
+ * list via bpf_list_del.
*/
SEC("tc")
-__description("test_bpf_list_del: remove an arbitrary node from the list")
+__description("test_list_add_del: test bpf_list_add/del")
__success __retval(0)
-long test_bpf_list_del(void *ctx)
+long test_list_add_del(void *ctx)
{
- long err;
+ long err = 0;
struct bpf_rb_node *rb;
- struct bpf_list_node *l;
- struct node_data *n;
+ struct bpf_list_node *l, *l_1;
+ struct node_data *n, *n_1, *m_1;
err = __insert_in_tree_and_list(&head, &root, &lock);
if (err)
@@ -392,20 +393,48 @@ long test_bpf_list_del(void *ctx)
}
rb = bpf_rbtree_remove(&root, rb);
- if (!rb) {
- bpf_spin_unlock(&lock);
+ bpf_spin_unlock(&lock);
+ if (!rb)
return -5;
- }
n = container_of(rb, struct node_data, r);
+ n_1 = bpf_obj_new(typeof(*n_1));
+ if (!n_1) {
+ bpf_obj_drop(n);
+ return -1;
+ }
+ m_1 = bpf_refcount_acquire(n_1);
+ if (!m_1) {
+ bpf_obj_drop(n);
+ bpf_obj_drop(n_1);
+ return -1;
+ }
+
+ bpf_spin_lock(&lock);
+ if (bpf_list_add(&head, &n_1->l, &n->l)) {
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(n);
+ bpf_obj_drop(m_1);
+ return -8;
+ }
+
l = bpf_list_del(&head, &n->l);
+ l_1 = bpf_list_del(&head, &m_1->l);
bpf_spin_unlock(&lock);
bpf_obj_drop(n);
- if (!l)
- return -6;
+ bpf_obj_drop(m_1);
- bpf_obj_drop(container_of(l, struct node_data, l));
- return 0;
+ if (l)
+ bpf_obj_drop(container_of(l, struct node_data, l));
+ else
+ err = -6;
+
+ if (l_1)
+ bpf_obj_drop(container_of(l_1, struct node_data, l));
+ else
+ err = -6;
+
+ return err;
}
SEC("?tc")
@@ -438,6 +467,71 @@ long list_del_without_lock_fail(void *ctx)
return 0;
}
+SEC("?tc")
+__failure __msg("bpf_spin_lock at off=32 must be held for bpf_list_head")
+long list_add_without_lock_fail(void *ctx)
+{
+ long err = 0;
+ struct bpf_rb_node *rb;
+ struct bpf_list_node *l, *l_1;
+ struct node_data *n, *n_1, *m_1;
+
+ err = __insert_in_tree_and_list(&head, &root, &lock);
+ if (err)
+ return err;
+
+ bpf_spin_lock(&lock);
+ rb = bpf_rbtree_first(&root);
+ if (!rb) {
+ bpf_spin_unlock(&lock);
+ return -4;
+ }
+
+ rb = bpf_rbtree_remove(&root, rb);
+ bpf_spin_unlock(&lock);
+ if (!rb)
+ return -5;
+
+ n = container_of(rb, struct node_data, r);
+ n_1 = bpf_obj_new(typeof(*n_1));
+ if (!n_1) {
+ bpf_obj_drop(n);
+ return -1;
+ }
+ m_1 = bpf_refcount_acquire(n_1);
+ if (!m_1) {
+ bpf_obj_drop(n);
+ bpf_obj_drop(n_1);
+ return -1;
+ }
+
+ /* Intentionally no lock: verifier should reject bpf_list_add without lock */
+ if (bpf_list_add(&head, &n_1->l, &n->l)) {
+ bpf_obj_drop(n);
+ bpf_obj_drop(m_1);
+ return -8;
+ }
+
+ bpf_spin_lock(&lock);
+ l = bpf_list_del(&head, &n->l);
+ l_1 = bpf_list_del(&head, &m_1->l);
+ bpf_spin_unlock(&lock);
+ bpf_obj_drop(n);
+ bpf_obj_drop(m_1);
+
+ if (l)
+ bpf_obj_drop(container_of(l, struct node_data, l));
+ else
+ err = -6;
+
+ if (l_1)
+ bpf_obj_drop(container_of(l_1, struct node_data, l));
+ else
+ err = -6;
+
+ return err;
+}
+
SEC("tc")
__success
long rbtree_refcounted_node_ref_escapes(void *ctx)
--
2.50.1 (Apple Git-155)