[PATCH 04/12] locking/rwsem: Make rwsem_spin_on_owner() return owner state

From: Waiman Long
Date: Thu Mar 28 2019 - 14:12:47 EST


This patch modifies rwsem_spin_on_owner() to return four possible
values to better reflect the state of lock holder which enables us to
make a better decision of what to do next.

In the special case that there is no active lock and the handoff bit
is set, optimistic spinning has to be stopped.

Signed-off-by: Waiman Long <longman@xxxxxxxxxx>
---
kernel/locking/rwsem-xadd.c | 40 ++++++++++++++++++++++++++++++-------
kernel/locking/rwsem.h | 5 +++++
2 files changed, 38 insertions(+), 7 deletions(-)

diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 4f036bda9063..35891c53338b 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -281,14 +281,30 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
}

/*
- * Return true only if we can still spin on the owner field of the rwsem.
+ * Return the folowing 4 values depending on the lock owner state.
+ * OWNER_NULL : owner is currently NULL
+ * OWNER_WRITER: when owner changes and is a writer
+ * OWNER_READER: when owner changes and the new owner may be a reader.
+ * OWNER_NONSPINNABLE:
+ * when optimistic spinning has to stop because either the
+ * owner stops running, is unknown, or its timeslice has
+ * been used up.
*/
-static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
+enum owner_state {
+ OWNER_NULL = 1 << 0,
+ OWNER_WRITER = 1 << 1,
+ OWNER_READER = 1 << 2,
+ OWNER_NONSPINNABLE = 1 << 3,
+};
+#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER)
+
+static noinline enum owner_state rwsem_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *owner = READ_ONCE(sem->owner);
+ long count;

if (!is_rwsem_owner_spinnable(owner))
- return false;
+ return OWNER_NONSPINNABLE;

rcu_read_lock();
while (owner && (READ_ONCE(sem->owner) == owner)) {
@@ -306,7 +322,7 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
*/
if (need_resched() || !owner_on_cpu(owner)) {
rcu_read_unlock();
- return false;
+ return OWNER_NONSPINNABLE;
}

cpu_relax();
@@ -315,9 +331,19 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)

/*
* If there is a new owner or the owner is not set, we continue
- * spinning.
+ * spinning except when here is no active locks and the handoff bit
+ * is set. In this case, we have to stop spinning.
*/
- return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
+ owner = READ_ONCE(sem->owner);
+ if (!is_rwsem_owner_spinnable(owner))
+ return OWNER_NONSPINNABLE;
+ if (owner && !is_rwsem_owner_reader(owner))
+ return OWNER_WRITER;
+
+ count = atomic_long_read(&sem->count);
+ if (RWSEM_COUNT_HANDOFF(count) && !RWSEM_COUNT_LOCKED(count))
+ return OWNER_NONSPINNABLE;
+ return !owner ? OWNER_NULL : OWNER_READER;
}

static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
@@ -340,7 +366,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
* 2) readers own the lock as we can't determine if they are
* actively running or not.
*/
- while (rwsem_spin_on_owner(sem)) {
+ while (rwsem_spin_on_owner(sem) & OWNER_SPINNABLE) {
/*
* Try to acquire the lock
*/
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
index 736466237c1d..4278222daaec 100644
--- a/kernel/locking/rwsem.h
+++ b/kernel/locking/rwsem.h
@@ -117,6 +117,11 @@ static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
}

+static inline bool is_rwsem_owner_reader(struct task_struct *owner)
+{
+ return (unsigned long)owner & RWSEM_READER_OWNED;
+}
+
/*
* Return true if rwsem is owned by an anonymous writer or readers.
*/
--
2.18.1