[tip:locking/core] locking/spinlocks: Remove an instruction from spin and write locks

From: tip-bot for Matthew Wilcox
Date: Tue Oct 02 2018 - 06:08:28 EST


Commit-ID: 27df89689e257cccb604fdf56c91a75a25aa554a
Gitweb: https://git.kernel.org/tip/27df89689e257cccb604fdf56c91a75a25aa554a
Author: Matthew Wilcox <willy@xxxxxxxxxxxxx>
AuthorDate: Mon, 20 Aug 2018 10:19:14 -0400
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Tue, 2 Oct 2018 09:49:42 +0200

locking/spinlocks: Remove an instruction from spin and write locks

Both spin locks and write locks currently do:

f0 0f b1 17 lock cmpxchg %edx,(%rdi)
85 c0 test %eax,%eax
75 05 jne [slowpath]

This 'test' insn is superfluous; the cmpxchg insn sets the Z flag
appropriately. Peter pointed out that using atomic_try_cmpxchg_acquire()
will let the compiler know this is true. Comparing before/after
disassemblies show the only effect is to remove this insn.

Take this opportunity to make the spin & write lock code resemble each
other more closely and have similar likely() hints.

Suggested-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Signed-off-by: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Acked-by: Will Deacon <will.deacon@xxxxxxx>
Cc: Arnd Bergmann <arnd@xxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Waiman Long <longman@xxxxxxxxxx>
Link: http://lkml.kernel.org/r/20180820162639.GC25153@xxxxxxxxxxxxxxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
include/asm-generic/qrwlock.h | 7 ++++---
include/asm-generic/qspinlock.h | 16 +++++++++-------
2 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 0f7062bd55e5..36254d2da8e0 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -71,8 +71,8 @@ static inline int queued_write_trylock(struct qrwlock *lock)
if (unlikely(cnts))
return 0;

- return likely(atomic_cmpxchg_acquire(&lock->cnts,
- cnts, cnts | _QW_LOCKED) == cnts);
+ return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
+ _QW_LOCKED));
}
/**
* queued_read_lock - acquire read lock of a queue rwlock
@@ -96,8 +96,9 @@ static inline void queued_read_lock(struct qrwlock *lock)
*/
static inline void queued_write_lock(struct qrwlock *lock)
{
+ u32 cnts = 0;
/* Optimize for the unfair lock case where the fair flag is 0. */
- if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
+ if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
return;

queued_write_lock_slowpath(lock);
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 9cc457597ddf..7541fa707f5b 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -66,10 +66,12 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
*/
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
- if (!atomic_read(&lock->val) &&
- (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
- return 1;
- return 0;
+ u32 val = atomic_read(&lock->val);
+
+ if (unlikely(val))
+ return 0;
+
+ return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
}

extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
@@ -80,11 +82,11 @@ extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
*/
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
- u32 val;
+ u32 val = 0;

- val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
- if (likely(val == 0))
+ if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
return;
+
queued_spin_lock_slowpath(lock, val);
}