[patch V3 43/64] locking/ww_mutex: Abstract waiter enqueueing

From: Thomas Gleixner
Date: Thu Aug 05 2021 - 11:44:44 EST


From: Peter Zijlstra <peterz@xxxxxxxxxxxxx>

The upcoming rtmutex based ww_mutex needs a different handling for
enqueueing a waiter. Split it out into a helper function.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

---
kernel/locking/ww_mutex.h | 19 +++++++++++++------
1 file changed, 13 insertions(+), 6 deletions(-)
---
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -44,6 +44,15 @@ static inline struct mutex_waiter *
return w;
}

+static inline void
+__ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
+{
+ struct list_head *p = &lock->wait_list;
+ if (pos)
+ p = &pos->list;
+ __mutex_add_waiter(lock, waiter, p);
+}
+
/*
* Wait-Die:
* The newer transactions are killed when:
@@ -337,12 +346,11 @@ static inline int
struct mutex *lock,
struct ww_acquire_ctx *ww_ctx)
{
- struct mutex_waiter *cur;
- struct list_head *pos;
+ struct mutex_waiter *cur, *pos = NULL;
bool is_wait_die;

if (!ww_ctx) {
- __mutex_add_waiter(lock, waiter, &lock->wait_list);
+ __ww_waiter_add(lock, waiter, NULL);
return 0;
}

@@ -355,7 +363,6 @@ static inline int
* never die here, but they are sorted in stamp order and
* may wound the lock holder.
*/
- pos = &lock->wait_list;
for (cur = __ww_waiter_last(lock); cur;
cur = __ww_waiter_prev(lock, cur)) {

@@ -378,13 +385,13 @@ static inline int
break;
}

- pos = &cur->list;
+ pos = cur;

/* Wait-Die: ensure younger waiters die. */
__ww_mutex_die(lock, cur, ww_ctx);
}

- __mutex_add_waiter(lock, waiter, pos);
+ __ww_waiter_add(lock, waiter, pos);

/*
* Wound-Wait: if we're blocking on a mutex owned by a younger context,