[PATCH v3 next 3/5] Use node->prev_cpu instead of saving node->prev
From: david . laight . linux
Date: Fri Mar 06 2026 - 17:53:43 EST
From: David Laight <david.laight.linux@xxxxxxxxx>
node->prev is only used to update 'prev' in the unlikely case
of concurrent unqueues.
The new 'prev' pointer can be obtained from prev_cpu.
node->cpu (or more particularly) prev->cpu is only used for the
osq_wait_next() call in the unqueue path.
Normally this is exactly the value that the initial xchg() read
from lock->tail (used to obtain 'prev'), but can get updated
by concurrent unqueues.
Both the 'prev' and 'cpu' members of optimistic_spin_node are
now unused and can be deleted.
Signed-off-by: David Laight <david.laight.linux@xxxxxxxxx>
---
kernel/locking/osq_lock.c | 31 ++++++++++++++-----------------
1 file changed, 14 insertions(+), 17 deletions(-)
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 0e1c7d11b6c0..5dd7e08d4fda 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -13,9 +13,8 @@
*/
struct optimistic_spin_node {
- struct optimistic_spin_node *next, *prev;
+ struct optimistic_spin_node *next;
int locked; /* 1 if lock acquired */
- int cpu; /* encoded CPU # + 1 value */
int prev_cpu; /* encoded CPU # + 1 value */
};
@@ -96,10 +95,9 @@ bool osq_lock(struct optimistic_spin_queue *lock)
struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
struct optimistic_spin_node *prev, *next;
int curr = encode_cpu(smp_processor_id());
- int old;
+ int prev_cpu;
node->next = NULL;
- node->cpu = curr;
/*
* We need both ACQUIRE (pairs with corresponding RELEASE in
@@ -107,23 +105,22 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* the node fields we just initialised) semantics when updating
* the lock tail.
*/
- old = atomic_xchg(&lock->tail, curr);
- if (old == OSQ_UNLOCKED_VAL)
+ prev_cpu = atomic_xchg(&lock->tail, curr);
+ if (prev_cpu == OSQ_UNLOCKED_VAL)
return true;
- WRITE_ONCE(node->prev_cpu, old);
- prev = decode_cpu(old);
- node->prev = prev;
+ WRITE_ONCE(node->prev_cpu, prev_cpu);
+ prev = decode_cpu(prev_cpu);
node->locked = 0;
/*
* osq_lock() unqueue
*
- * node->prev = prev osq_wait_next()
+ * node->prev_cpu = prev_cpu osq_wait_next()
* WMB MB
- * prev->next = node next->prev = prev // unqueue-C
+ * prev->next = node next->prev_cpu = prev_cpu // unqueue-C
*
- * Here 'node->prev' and 'next->prev' are the same variable and we need
+ * Here 'node->prev_cpu' and 'next->prev_cpu' are the same variable and we need
* to ensure these stores happen in-order to avoid corrupting the list.
*/
smp_wmb();
@@ -179,9 +176,10 @@ bool osq_lock(struct optimistic_spin_queue *lock)
/*
* Or we race against a concurrent unqueue()'s step-B, in which
- * case its step-C will write us a new @node->prev pointer.
+ * case its step-C will write us a new @node->prev_cpu value.
*/
- prev = READ_ONCE(node->prev);
+ prev_cpu = READ_ONCE(node->prev_cpu);
+ prev = decode_cpu(prev_cpu);
}
/*
@@ -191,7 +189,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* back to @prev.
*/
- next = osq_wait_next(lock, node, prev->cpu);
+ next = osq_wait_next(lock, node, prev_cpu);
if (!next)
return false;
@@ -203,8 +201,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* it will wait in Step-A.
*/
- WRITE_ONCE(next->prev_cpu, prev->cpu);
- WRITE_ONCE(next->prev, prev);
+ WRITE_ONCE(next->prev_cpu, prev_cpu);
WRITE_ONCE(prev->next, next);
return false;
--
2.39.5