[PATCH bpf-next v2 25/26] bpf: Maintain FIFO property for rqspinlock unlock
From: Kumar Kartikeya Dwivedi
Date: Thu Feb 06 2025 - 06:01:01 EST
Since out-of-order unlocks are unsupported for rqspinlock, and irqsave
variants enforce strict FIFO ordering anyway, make the same change for
normal non-irqsave variants, such that FIFO ordering is enforced.
Two new verifier state fields (active_lock_id, active_lock_ptr) are used
to denote the top of the stack, and prev_id and prev_ptr are ascertained
whenever popping the topmost entry through an unlock.
Take special care to make these fields part of the state comparison in
refsafe.
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx>
---
include/linux/bpf_verifier.h | 3 +++
kernel/bpf/verifier.c | 33 ++++++++++++++++++++++++++++-----
2 files changed, 31 insertions(+), 5 deletions(-)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index ed444e44f524..92cd2289b743 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -269,6 +269,7 @@ struct bpf_reference_state {
REF_TYPE_LOCK = (1 << 3),
REF_TYPE_RES_LOCK = (1 << 4),
REF_TYPE_RES_LOCK_IRQ = (1 << 5),
+ REF_TYPE_LOCK_MASK = REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
} type;
/* Track each reference created with a unique id, even if the same
* instruction creates the reference multiple times (eg, via CALL).
@@ -435,6 +436,8 @@ struct bpf_verifier_state {
u32 active_locks;
u32 active_preempt_locks;
u32 active_irq_id;
+ u32 active_lock_id;
+ void *active_lock_ptr;
bool active_rcu_lock;
bool speculative;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 294761dd0072..9cac6ea4f844 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1421,6 +1421,8 @@ static int copy_reference_state(struct bpf_verifier_state *dst, const struct bpf
dst->active_preempt_locks = src->active_preempt_locks;
dst->active_rcu_lock = src->active_rcu_lock;
dst->active_irq_id = src->active_irq_id;
+ dst->active_lock_id = src->active_lock_id;
+ dst->active_lock_ptr = src->active_lock_ptr;
return 0;
}
@@ -1520,6 +1522,8 @@ static int acquire_lock_state(struct bpf_verifier_env *env, int insn_idx, enum r
s->ptr = ptr;
state->active_locks++;
+ state->active_lock_id = id;
+ state->active_lock_ptr = ptr;
return 0;
}
@@ -1559,16 +1563,24 @@ static void release_reference_state(struct bpf_verifier_state *state, int idx)
static int release_lock_state(struct bpf_verifier_state *state, int type, int id, void *ptr)
{
+ void *prev_ptr = NULL;
+ u32 prev_id = 0;
int i;
for (i = 0; i < state->acquired_refs; i++) {
- if (state->refs[i].type != type)
- continue;
- if (state->refs[i].id == id && state->refs[i].ptr == ptr) {
+ if (state->refs[i].type == type && state->refs[i].id == id &&
+ state->refs[i].ptr == ptr) {
release_reference_state(state, i);
state->active_locks--;
+ /* Reassign active lock (id, ptr). */
+ state->active_lock_id = prev_id;
+ state->active_lock_ptr = prev_ptr;
return 0;
}
+ if (state->refs[i].type & REF_TYPE_LOCK_MASK) {
+ prev_id = state->refs[i].id;
+ prev_ptr = state->refs[i].ptr;
+ }
}
return -EINVAL;
}
@@ -8123,6 +8135,14 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno, int flags)
type = REF_TYPE_RES_LOCK;
else
type = REF_TYPE_LOCK;
+ if (!find_lock_state(cur, type, reg->id, ptr)) {
+ verbose(env, "%s_unlock of different lock\n", lock_str);
+ return -EINVAL;
+ }
+ if (reg->id != cur->active_lock_id || ptr != cur->active_lock_ptr) {
+ verbose(env, "%s_unlock cannot be out of order\n", lock_str);
+ return -EINVAL;
+ }
if (release_lock_state(cur, type, reg->id, ptr)) {
verbose(env, "%s_unlock of different lock\n", lock_str);
return -EINVAL;
@@ -12284,8 +12304,7 @@ static int check_reg_allocation_locked(struct bpf_verifier_env *env, struct bpf_
if (!env->cur_state->active_locks)
return -EINVAL;
- s = find_lock_state(env->cur_state, REF_TYPE_LOCK | REF_TYPE_RES_LOCK | REF_TYPE_RES_LOCK_IRQ,
- id, ptr);
+ s = find_lock_state(env->cur_state, REF_TYPE_LOCK_MASK, id, ptr);
if (!s) {
verbose(env, "held lock and object are not in the same allocation\n");
return -EINVAL;
@@ -18288,6 +18307,10 @@ static bool refsafe(struct bpf_verifier_state *old, struct bpf_verifier_state *c
if (!check_ids(old->active_irq_id, cur->active_irq_id, idmap))
return false;
+ if (!check_ids(old->active_lock_id, cur->active_lock_id, idmap) ||
+ old->active_lock_ptr != cur->active_lock_ptr)
+ return false;
+
for (i = 0; i < old->acquired_refs; i++) {
if (!check_ids(old->refs[i].id, cur->refs[i].id, idmap) ||
old->refs[i].type != cur->refs[i].type)
--
2.43.5