[PATCH bpf-next v2 12/26] rqspinlock: Add a test-and-set fallback
From: Kumar Kartikeya Dwivedi
Date: Thu Feb 06 2025 - 05:57:45 EST
Include a test-and-set fallback when queued spinlock support is not
available. Introduce a rqspinlock type to act as a fallback when
qspinlock support is absent.
Include ifdef guards to ensure the slow path in this file is only
compiled when CONFIG_QUEUED_SPINLOCKS=y. Subsequent patches will add
further logic to ensure fallback to the test-and-set implementation
when queued spinlock support is unavailable on an architecture.
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx>
---
include/asm-generic/rqspinlock.h | 17 +++++++++++++++
kernel/locking/rqspinlock.c | 37 ++++++++++++++++++++++++++++++++
2 files changed, 54 insertions(+)
diff --git a/include/asm-generic/rqspinlock.h b/include/asm-generic/rqspinlock.h
index c1dbd25287a1..92e53b2aafb9 100644
--- a/include/asm-generic/rqspinlock.h
+++ b/include/asm-generic/rqspinlock.h
@@ -12,11 +12,28 @@
#include <linux/types.h>
#include <vdso/time64.h>
#include <linux/percpu.h>
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#endif
+
+struct rqspinlock {
+ union {
+ atomic_t val;
+ u32 locked;
+ };
+};
struct qspinlock;
+#ifdef CONFIG_QUEUED_SPINLOCKS
typedef struct qspinlock rqspinlock_t;
+#else
+typedef struct rqspinlock rqspinlock_t;
+#endif
+extern int resilient_tas_spin_lock(rqspinlock_t *lock, u64 timeout);
+#ifdef CONFIG_QUEUED_SPINLOCKS
extern int resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val, u64 timeout);
+#endif
/*
* Default timeout for waiting loops is 0.5 seconds
diff --git a/kernel/locking/rqspinlock.c b/kernel/locking/rqspinlock.c
index 42e8a56534b6..ea034e80f855 100644
--- a/kernel/locking/rqspinlock.c
+++ b/kernel/locking/rqspinlock.c
@@ -21,7 +21,9 @@
#include <linux/mutex.h>
#include <linux/prefetch.h>
#include <asm/byteorder.h>
+#ifdef CONFIG_QUEUED_SPINLOCKS
#include <asm/qspinlock.h>
+#endif
#include <trace/events/lock.h>
#include <asm/rqspinlock.h>
#include <linux/timekeeping.h>
@@ -29,8 +31,10 @@
/*
* Include queued spinlock definitions and statistics code
*/
+#ifdef CONFIG_QUEUED_SPINLOCKS
#include "qspinlock.h"
#include "rqspinlock.h"
+#endif
#include "qspinlock_stat.h"
/*
@@ -252,6 +256,37 @@ static noinline int check_timeout(rqspinlock_t *lock, u32 mask,
*/
#define RES_RESET_TIMEOUT(ts) ({ (ts).timeout_end = 0; })
+/*
+ * Provide a test-and-set fallback for cases when queued spin lock support is
+ * absent from the architecture.
+ */
+int __lockfunc resilient_tas_spin_lock(rqspinlock_t *lock, u64 timeout)
+{
+ struct rqspinlock_timeout ts;
+ int val, ret = 0;
+
+ RES_INIT_TIMEOUT(ts, timeout);
+ grab_held_lock_entry(lock);
+retry:
+ val = atomic_read(&lock->val);
+
+ if (val || !atomic_try_cmpxchg(&lock->val, &val, 1)) {
+ if (RES_CHECK_TIMEOUT(ts, ret, ~0u)) {
+ lockevent_inc(rqspinlock_lock_timeout);
+ goto out;
+ }
+ cpu_relax();
+ goto retry;
+ }
+
+ return 0;
+out:
+ release_held_lock_entry();
+ return ret;
+}
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+
/*
* Per-CPU queue node structures; we can never have more than 4 nested
* contexts: task, softirq, hardirq, nmi.
@@ -581,3 +616,5 @@ int __lockfunc resilient_queued_spin_lock_slowpath(rqspinlock_t *lock, u32 val,
return ret;
}
EXPORT_SYMBOL(resilient_queued_spin_lock_slowpath);
+
+#endif /* CONFIG_QUEUED_SPINLOCKS */
--
2.43.5