[PATCH v3] locking/mutex: remove redundant argument from __mutex_lock_common()

From: Michał Mirosław
Date: Tue Sep 12 2023 - 15:55:19 EST


use_ww_ctx is equivalent to ww_ctx != NULL. The one case where
use_ww_ctx was true but ww_ctx == NULL leads to the same
__mutex_add_waiter() call via __ww_mutex_add_waiter().

Since now __ww_mutex_add_waiter() is called only with ww_mutex != NULL
(from both regular and PREEMPT_RT implementations), remove the
branch there.

Resulting object size diffs (by gcc-12) are minor:

text data bss dec hex filename (x86-64)
22603 4696 16 27315 6ab3 /tmp/before.o
22593 4696 16 27305 6aa9 /tmp/after.o

text data bss dec hex filename (arm)
13488 56 8 13552 34f0 /tmp/before.o
13492 56 8 13556 34f4 /tmp/after.o

Signed-off-by: Michał Mirosław <mirq-linux@xxxxxxxxxxxx>
---
v3: extended commit message with `size` diffs
+ added back `if (ww_ctx)`-guarded store: compiler hoists it into the
following branch anyway and so it avoids the unnecessary store in
the `ww_ctx == NULL` case.
v2: extended commit message to note that PREEMPT_RT does not call
__ww_mutex_add_waiter() with ww_ctx == NULL
---
kernel/locking/mutex.c | 15 ++++++---------
kernel/locking/ww_mutex.h | 5 -----
2 files changed, 6 insertions(+), 14 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 4a3c006c41fb..045f7da4e473 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -578,15 +578,12 @@ EXPORT_SYMBOL(ww_mutex_unlock);
static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip,
- struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+ struct ww_acquire_ctx *ww_ctx)
{
struct mutex_waiter waiter;
struct ww_mutex *ww;
int ret;

- if (!use_ww_ctx)
- ww_ctx = NULL;
-
might_sleep();

MUTEX_WARN_ON(lock->magic != lock);
@@ -637,12 +634,12 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas

debug_mutex_lock_common(lock, &waiter);
waiter.task = current;
- if (use_ww_ctx)
+ if (ww_ctx)
waiter.ww_ctx = ww_ctx;

lock_contended(&lock->dep_map, ip);

- if (!use_ww_ctx) {
+ if (!ww_ctx) {
/* add waiting tasks to the end of the waitqueue (FIFO): */
__mutex_add_waiter(lock, &waiter, &lock->wait_list);
} else {
@@ -754,14 +751,14 @@ static int __sched
__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip)
{
- return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
+ return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL);
}

static int __sched
__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
unsigned long ip, struct ww_acquire_ctx *ww_ctx)
{
- return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
+ return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx);
}

/**
@@ -841,7 +838,7 @@ mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)

token = io_schedule_prepare();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
- subclass, NULL, _RET_IP_, NULL, 0);
+ subclass, NULL, _RET_IP_, NULL);
io_schedule_finish(token);
}
EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index 3ad2cc4823e5..11acb2efe976 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -493,11 +493,6 @@ __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
struct MUTEX_WAITER *cur, *pos = NULL;
bool is_wait_die;

- if (!ww_ctx) {
- __ww_waiter_add(lock, waiter, NULL);
- return 0;
- }
-
is_wait_die = ww_ctx->is_wait_die;

/*
--
2.39.2