Re: [RESEND][PATCH v18 3/8] locking/mutex: Add p->blocked_on wrappers for correctness checks
From: K Prateek Nayak
Date: Tue Jul 08 2025 - 02:44:52 EST
Hello John,
On 7/8/2025 2:13 AM, John Stultz wrote:
> @@ -2177,6 +2178,57 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
> __cond_resched_rwlock_write(lock); \
> })
>
> +static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
> +{
> + WARN_ON_ONCE(!m);
> + /* The task should only be setting itself as blocked */
> + WARN_ON_ONCE(p != current);
> + /* Currently we serialize blocked_on under the mutex::wait_lock */
> + lockdep_assert_held_once(&m->wait_lock);
Sorry I didn't catch this earlier but building this with PREEMPT_RT fails
with the following error:
./include/linux/sched.h: In function ‘__set_task_blocked_on’:
./include/linux/sched.h:2187:36: error: ‘struct mutex’ has no member named ‘wait_lock’
2187 | lockdep_assert_held_once(&m->wait_lock);
Putting all these helpers behind a "#ifdef CONFIG_PREEMPT_RT" will then
lead to the this error:
kernel/locking/ww_mutex.h:292:17: error: implicit declaration of function ‘__clear_task_blocked_on’ [-Werror=implicit-function-declaration]
292 | __clear_task_blocked_on(waiter->task, lock);
Putting the "__clear_task_blocked_on()" calls in kernel/locking/ww_mutex.h
behind "#ifndef WW_RT" (which should start from Patch 2 since MUTEX and
MUTEX_WAITER for ww_mutex will resolve to rt_mutex and rt_mutex_waiter in
presence of WW_RT) solves all build issues with PREEMPT_RT. I'll let others
comment on the correctness tho :)
Following is the diff I used on top of this series for reference:
(build and boot tested only)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1d7f625adbb5..d47c9e4edf4f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2178,6 +2178,8 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
__cond_resched_rwlock_write(lock); \
})
+#ifndef CONFIG_PREEMPT_RT
+
static inline void __set_task_blocked_on(struct task_struct *p, struct mutex *m)
{
WARN_ON_ONCE(!m);
@@ -2229,6 +2231,8 @@ static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
return m;
}
+#endif
+
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index 086fd5487ca7..fd67a4a49892 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -283,13 +283,14 @@ __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
#ifndef WW_RT
debug_mutex_wake_waiter(lock, waiter);
-#endif
+
/*
* When waking up the task to die, be sure to clear the
* blocked_on pointer. Otherwise we can see circular
* blocked_on relationships that can't resolve.
*/
__clear_task_blocked_on(waiter->task, lock);
+#endif
wake_q_add(wake_q, waiter->task);
}
@@ -338,12 +339,14 @@ static bool __ww_mutex_wound(struct MUTEX *lock,
* wakeup pending to re-read the wounded state.
*/
if (owner != current) {
+#ifndef WW_RT
/*
* When waking up the task to wound, be sure to clear the
* blocked_on pointer. Otherwise we can see circular
* blocked_on relationships that can't resolve.
*/
__clear_task_blocked_on(owner, lock);
+#endif
wake_q_add(wake_q, owner);
}
return true;
---
I'll make sure to give the PREEMPT_RT build a spin next time around.
Sorry for the oversight.
> + /*
> + * Check ensure we don't overwrite existing mutex value
> + * with a different mutex. Note, setting it to the same
> + * lock repeatedly is ok.
> + */
> + WARN_ON_ONCE(p->blocked_on && p->blocked_on != m);
> + p->blocked_on = m;
> +}
> +
> +static inline void set_task_blocked_on(struct task_struct *p, struct mutex *m)
> +{
> + guard(raw_spinlock_irqsave)(&m->wait_lock);
> + __set_task_blocked_on(p, m);
> +}
> +
> +static inline void __clear_task_blocked_on(struct task_struct *p, struct mutex *m)
> +{
> + WARN_ON_ONCE(!m);
> + /* Currently we serialize blocked_on under the mutex::wait_lock */
> + lockdep_assert_held_once(&m->wait_lock);
> + /*
> + * There may be cases where we re-clear already cleared
> + * blocked_on relationships, but make sure we are not
> + * clearing the relationship with a different lock.
> + */
> + WARN_ON_ONCE(m && p->blocked_on && p->blocked_on != m);
> + p->blocked_on = NULL;
> +}
> +
> +static inline void clear_task_blocked_on(struct task_struct *p, struct mutex *m)
> +{
> + guard(raw_spinlock_irqsave)(&m->wait_lock);
> + __clear_task_blocked_on(p, m);
> +}
> +
> +static inline struct mutex *__get_task_blocked_on(struct task_struct *p)
> +{
> + struct mutex *m = p->blocked_on;
> +
> + if (m)
> + lockdep_assert_held_once(&m->wait_lock);
> + return m;
> +}
> +
> static __always_inline bool need_resched(void)
> {
> return unlikely(tif_need_resched());
--
Thanks and Regards,
Prateek