Stop qspinlock.c including itself and avoid most of the function
renaming with the preprocessor.
This is mostly done by having the common slowpath code take a 'bool
paravirt' argument and adjusting code based on that.
Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx>
---
kernel/locking/qspinlock.c | 116 ++++++++++++----------------
kernel/locking/qspinlock_paravirt.h | 10 +--
2 files changed, 52 insertions(+), 74 deletions(-)
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 8f2173e22479..b96c58ca51de 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -11,8 +11,6 @@
* Peter Zijlstra <peterz@xxxxxxxxxxxxx>
*/
-#ifndef _GEN_PV_LOCK_SLOWPATH
-
#include <linux/smp.h>
#include <linux/bug.h>
#include <linux/cpumask.h>
@@ -285,35 +283,21 @@ static __always_inline void set_locked(struct qspinlock *lock)
WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
}
-
-/*
- * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
- * all the PV callbacks.
- */
-
-static __always_inline void __pv_init_node(struct qnode *node) { }
-static __always_inline void __pv_wait_node(struct qnode *node,
- struct qnode *prev) { }
-static __always_inline void __pv_kick_node(struct qspinlock *lock,
- struct qnode *node) { }
-static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
- struct qnode *node)
- { return 0; }
-
-#define pv_enabled() false
-
-#define pv_init_node __pv_init_node
-#define pv_wait_node __pv_wait_node
-#define pv_kick_node __pv_kick_node
-#define pv_wait_head_or_lock __pv_wait_head_or_lock
-
#ifdef CONFIG_PARAVIRT_SPINLOCKS
-#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
-#endif
-
-#endif /* _GEN_PV_LOCK_SLOWPATH */
+#include "qspinlock_paravirt.h"
+#else /* CONFIG_PARAVIRT_SPINLOCKS */
+static __always_inline void pv_init_node(struct qnode *node) { }
+static __always_inline void pv_wait_node(struct qnode *node,
+ struct qnode *prev) { }
+static __always_inline void pv_kick_node(struct qspinlock *lock,
+ struct qnode *node) { }
+static __always_inline u32 pv_wait_head_or_lock(struct qspinlock *lock,
+ struct qnode *node)
+ { return 0; }
+static __always_inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) { BUILD_BUG(); }
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
+static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt)
{
struct qnode *prev, *next, *node;
u32 val, old, tail;
@@ -338,8 +322,13 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
*/
if (unlikely(idx >= MAX_NODES)) {
lockevent_inc(lock_no_node);
- while (!queued_spin_trylock(lock))
- cpu_relax();
+ if (paravirt) {
+ while (!pv_hybrid_queued_unfair_trylock(lock))
+ cpu_relax();
+ } else {
+ while (!queued_spin_trylock(lock))
+ cpu_relax();
+ }