[PATCH-queue/locking/core] locking/mutex: Unify yield_to_waiter & waiter_spinning

From: Waiman Long
Date: Thu Aug 18 2016 - 21:27:06 EST


Both waiter_spinning and yield_to_waiter are used for somewhat similar
purpose. The waiter_spinning flag is used in CONFIG_MUTEX_SPIN_ON_OWNER
to make optimistic spinner yield to spinning waiter; whereas the
yield_to_waiter is used in !CONFIG_MUTEX_SPIN_ON_OWNER to make new
incoming mutex locker to yield to the sleeping waiter.

This patch unifies these 2 flags into a single yield_to_waiter
flag that is used in both CONFIG_MUTEX_SPIN_ON_OWNER and
!CONFIG_MUTEX_SPIN_ON_OWNER codes.

Signed-off-by: Waiman Long <Waiman.Long@xxxxxxx>
---
include/linux/mutex.h | 8 ++++++--
kernel/locking/mutex.c | 12 ++++++------
2 files changed, 12 insertions(+), 8 deletions(-)

diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 988c020..2b3dcdb 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -57,8 +57,12 @@ struct mutex {
#endif
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
struct optimistic_spin_queue osq; /* Spinner MCS lock */
- int waiter_spinning;
-#elif defined(CONFIG_SMP)
+#endif
+#ifdef CONFIG_SMP
+ /*
+ * Used by both CONFIG_MUTEX_SPIN_ON_OWNER (depends on CONFIG_SMP) &
+ * !CONFIG_MUTEX_SPIN_ON_OWNER codes.
+ */
int yield_to_waiter;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index c4261fa..7218835 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -55,8 +55,8 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
mutex_clear_owner(lock);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
osq_lock_init(&lock->osq);
- lock->waiter_spinning = false;
-#elif defined(CONFIG_SMP)
+#endif
+#ifdef CONFIG_SMP
lock->yield_to_waiter = false;
#endif

@@ -351,7 +351,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* Turn on the waiter spinning flag to discourage the spinner
* from getting the lock.
*/
- lock->waiter_spinning = true;
+ lock->yield_to_waiter = true;
}

for (;;) {
@@ -374,11 +374,11 @@ static bool mutex_optimistic_spin(struct mutex *lock,
}

/*
- * For regular opt-spinner, it waits until the waiter_spinning
+ * For regular opt-spinner, it waits until the yield_to_waiter
* flag isn't set. This will ensure forward progress for
* the waiter spinner.
*/
- if (!waiter && READ_ONCE(lock->waiter_spinning)) {
+ if (!waiter && READ_ONCE(lock->yield_to_waiter)) {
if (need_resched())
break;
goto relax;
@@ -430,7 +430,7 @@ relax:
if (!waiter)
osq_unlock(&lock->osq);
else
- lock->waiter_spinning = false;
+ lock->yield_to_waiter = false;
done:
/*
* If we fell out of the spin path because of need_resched(),
--
1.7.1