On Wed, May 07, 2014 at 11:01:38AM -0400, Waiman Long wrote:
No, we want the unfair thing for VIRT, not PARAVIRT.
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.cThat's missing {}.
index 9e7659e..10e87e1 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -227,6 +227,14 @@ static __always_inline int get_qlock(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
+#ifdef CONFIG_PARAVIRT_UNFAIR_LOCKS
+ if (static_key_false(¶virt_unfairlocks_enabled))
+ /*
+ * Need to use atomic operation to get the lock when
+ * lock stealing can happen.
+ */
+ return cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0;
+#endif
barrier();
ACCESS_ONCE(l->locked) = _Q_LOCKED_VAL;
barrier();
But no, what you want is:
static __always_inline bool virt_lock(struct qspinlock *lock)
{
#ifdef CONFIG_VIRT_MUCK
if (static_key_false(&virt_unfairlocks_enabled)) {
while (!queue_spin_trylock(lock))
cpu_relax();
return true;
}
#else
return false;
}
void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
if (virt_lock(lock))
return;
...
}