[PATCH v4 08/16] locking/rwsem: Make rwsem_spin_on_owner() return owner state
From: Waiman Long
Date: Sat Apr 13 2019 - 13:23:59 EST
This patch modifies rwsem_spin_on_owner() to return four possible
values to better reflect the state of lock holder which enables us to
make a better decision of what to do next.
In the special case that there is no active lock and the handoff bit
is set, optimistic spinning has to be stopped.
Signed-off-by: Waiman Long <longman@xxxxxxxxxx>
---
kernel/locking/rwsem.c | 45 +++++++++++++++++++++++++++++++++++-------
1 file changed, 38 insertions(+), 7 deletions(-)
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index aaab546a890d..2d6850c3e77b 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -156,6 +156,11 @@ static inline bool is_rwsem_owner_spinnable(struct task_struct *owner)
return !((unsigned long)owner & RWSEM_ANONYMOUSLY_OWNED);
}
+static inline bool is_rwsem_owner_reader(struct task_struct *owner)
+{
+ return (unsigned long)owner & RWSEM_READER_OWNED;
+}
+
/*
* Return true if rwsem is owned by an anonymous writer or readers.
*/
@@ -466,14 +471,30 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
}
/*
- * Return true only if we can still spin on the owner field of the rwsem.
+ * Return the folowing 4 values depending on the lock owner state.
+ * OWNER_NULL : owner is currently NULL
+ * OWNER_WRITER: when owner changes and is a writer
+ * OWNER_READER: when owner changes and the new owner may be a reader.
+ * OWNER_NONSPINNABLE:
+ * when optimistic spinning has to stop because either the
+ * owner stops running, is unknown, or its timeslice has
+ * been used up.
*/
-static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
+enum owner_state {
+ OWNER_NULL = 1 << 0,
+ OWNER_WRITER = 1 << 1,
+ OWNER_READER = 1 << 2,
+ OWNER_NONSPINNABLE = 1 << 3,
+};
+#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER)
+
+static noinline enum owner_state rwsem_spin_on_owner(struct rw_semaphore *sem)
{
struct task_struct *owner = READ_ONCE(sem->owner);
+ long count;
if (!is_rwsem_owner_spinnable(owner))
- return false;
+ return OWNER_NONSPINNABLE;
rcu_read_lock();
while (owner && (READ_ONCE(sem->owner) == owner)) {
@@ -491,7 +512,7 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
*/
if (need_resched() || !owner_on_cpu(owner)) {
rcu_read_unlock();
- return false;
+ return OWNER_NONSPINNABLE;
}
cpu_relax();
@@ -500,9 +521,19 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
/*
* If there is a new owner or the owner is not set, we continue
- * spinning.
+ * spinning except when here is no active locks and the handoff bit
+ * is set. In this case, we have to stop spinning.
*/
- return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
+ owner = READ_ONCE(sem->owner);
+ if (!is_rwsem_owner_spinnable(owner))
+ return OWNER_NONSPINNABLE;
+ if (owner && !is_rwsem_owner_reader(owner))
+ return OWNER_WRITER;
+
+ count = atomic_long_read(&sem->count);
+ if (RWSEM_COUNT_HANDOFF(count) && !RWSEM_COUNT_LOCKED(count))
+ return OWNER_NONSPINNABLE;
+ return !owner ? OWNER_NULL : OWNER_READER;
}
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
@@ -525,7 +556,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
* 2) readers own the lock as we can't determine if they are
* actively running or not.
*/
- while (rwsem_spin_on_owner(sem)) {
+ while (rwsem_spin_on_owner(sem) & OWNER_SPINNABLE) {
/*
* Try to acquire the lock
*/
--
2.18.1