[PATCH 1/2] locking/rwbase: Optimize rwbase_read_trylock

From: Davidlohr Bueso
Date: Mon Sep 20 2021 - 01:24:38 EST


Instead of a full barrier around the Rmw insn, micro-optimize
for weakly ordered archs such that we only provide the required
ACQUIRE semantics when taking the read lock.

Signed-off-by: Davidlohr Bueso <dbueso@xxxxxxx>
---
kernel/locking/rwbase_rt.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c
index 88191f6e252c..a9034784a5a0 100644
--- a/kernel/locking/rwbase_rt.c
+++ b/kernel/locking/rwbase_rt.c
@@ -59,8 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
* set.
*/
for (r = atomic_read(&rwb->readers); r < 0;) {
- /* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
- if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
+ if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1)))
return 1;
}
return 0;
@@ -183,7 +182,7 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,

/*
* _release() is needed in case that reader is in fast path, pairing
- * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
+ * with atomic_try_cmpxchg_acquire() in rwbase_read_trylock().
*/
(void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
--
2.26.2