Re: [PATCH RFC 3/3] locking: Wire up contended_release tracepoint

From: Steven Rostedt

Date: Thu Mar 05 2026 - 11:01:42 EST


On Wed, 4 Mar 2026 16:56:17 +0000
Dmitry Ilvokhin <d@xxxxxxxxxxxx> wrote:

> @@ -204,6 +206,8 @@ static inline void rwbase_write_unlock(struct rwbase_rt *rwb)
> unsigned long flags;
>
> raw_spin_lock_irqsave(&rtm->wait_lock, flags);
> + if (rt_mutex_has_waiters(rtm))
> + trace_contended_release(rwb);

Hmm, if statements should never be used just for tracepoints without a
static branch. The above should be:

if (trace_contended_release_enabled() && rt_mutex_has_waiters(rtm))
trace_contended_release(rwb);

The above "trace_contened_release_enabled()" is a static_branch where it
turns the if statement into a nop when the tracepoint is not enabled, and a
jmp when it is.


> __rwbase_write_unlock(rwb, WRITER_BIAS, flags);
> }
>
> @@ -213,6 +217,8 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
> unsigned long flags;
>
> raw_spin_lock_irqsave(&rtm->wait_lock, flags);
> + if (rt_mutex_has_waiters(rtm))
> + trace_contended_release(rwb);

Same here.

-- Steve

> /* Release it and account current as reader */
> __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
> }
> diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
> index 24df4d98f7d2..4e61dc0bb045 100644
> --- a/kernel/locking/rwsem.c
> +++ b/kernel/locking/rwsem.c
> @@ -1360,6 +1360,7 @@ static inline void __up_read(struct rw_semaphore *sem)
> if (unlikely((tmp & (RWSEM_LOCK_MASK|RWSEM_FLAG_WAITERS)) ==
> RWSEM_FLAG_WAITERS)) {
> clear_nonspinnable(sem);
> + trace_contended_release(sem);
> rwsem_wake(sem);
> }
> preempt_enable();
> @@ -1383,8 +1384,10 @@ static inline void __up_write(struct rw_semaphore *sem)
> preempt_disable();
> rwsem_clear_owner(sem);
> tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
> - if (unlikely(tmp & RWSEM_FLAG_WAITERS))
> + if (unlikely(tmp & RWSEM_FLAG_WAITERS)) {
> + trace_contended_release(sem);
> rwsem_wake(sem);
> + }
> preempt_enable();
> }
>
> @@ -1407,8 +1410,10 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
> tmp = atomic_long_fetch_add_release(
> -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
> rwsem_set_reader_owned(sem);
> - if (tmp & RWSEM_FLAG_WAITERS)
> + if (tmp & RWSEM_FLAG_WAITERS) {
> + trace_contended_release(sem);
> rwsem_downgrade_wake(sem);
> + }
> preempt_enable();
> }
>
> diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c
> index 3ef032e22f7e..3cef5ba88f7e 100644
> --- a/kernel/locking/semaphore.c
> +++ b/kernel/locking/semaphore.c
> @@ -231,8 +231,10 @@ void __sched up(struct semaphore *sem)
> else
> __up(sem, &wake_q);
> raw_spin_unlock_irqrestore(&sem->lock, flags);
> - if (!wake_q_empty(&wake_q))
> + if (!wake_q_empty(&wake_q)) {
> + trace_contended_release(sem);
> wake_up_q(&wake_q);
> + }
> }
> EXPORT_SYMBOL(up);
>