[patch 11/50] locking/rtmutex: Provide lockdep less variants of rtmutex interfaces

From: Thomas Gleixner
Date: Tue Jul 13 2021 - 12:13:34 EST


From: Thomas Gleixner <tglx@xxxxxxxxxxxxx>

The existing rtmutex_() functions are used by code which uses rtmutex
directly. These interfaces contain rtmutex specific lockdep operations.

The inner code can be reused for lock implementations which build on top of
rtmutexes, i.e. the lock substitutions for RT enabled kernels. But as these
are different lock types they have their own lockdep operations. Calling
the existing rtmutex interfaces for those would cause double lockdep checks
and longer lock chains for no value.

Provide rwsem_rt_mutex_lock_state(), rwsem_rt_mutex_trylock() and
rwsem_rt_mutex_unlock() which are not doing any lockdep operations on the
rtmutex itself. The caller has to do them on the lock type which embeds the
rtmutex.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
kernel/locking/rtmutex_api.c | 50 ++++++++++++++++++++++++++++++++++++++++
kernel/locking/rtmutex_common.h | 3 ++
2 files changed, 53 insertions(+)
---
--- a/kernel/locking/rtmutex_api.c
+++ b/kernel/locking/rtmutex_api.c
@@ -464,4 +464,54 @@ int __sched rwsem_rt_mutex_slowlock_lock
{
return __rt_mutex_slowlock_locked(lock, state);
}
+
+/**
+ * rwsem_rt_mutex_lock_state - Lock a rt_mutex with a given state
+ * @lock: The rt_mutex to be locked
+ * @state: The state to set when blocking on the rt_mutex
+ *
+ * The function does no lockdep operations on @lock. The lockdep state
+ * changes have to be done on the callsite related to the locking primitive
+ * which embeds the rtmutex. Otherwise lockdep has double tracking.
+ */
+int __sched rwsem_rt_mutex_lock_state(struct rt_mutex *lock, unsigned int state)
+{
+ return __rt_mutex_lock(lock, state);
+}
+
+/**
+ * rwsem_rt_mutex_trylock - Try to lock a rt_mutex
+ * @lock: The rt_mutex to be locked
+ *
+ * The function does no lockdep operations on @lock. The lockdep state
+ * changes have to be done on the callsite related to the locking primitive
+ * which embeds the rtmutex. Otherwise lockdep has double tracking.
+ */
+int __sched rwsem_rt_mutex_trylock(struct rt_mutex *lock)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES) &&
+ WARN_ON_ONCE(in_nmi() | in_hardirq()))
+ return 0;
+
+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+ return 1;
+
+ return rt_mutex_slowtrylock(lock);
+}
+
+/**
+ * rwsem_rt_mutex_unlock - Unlock a rt_mutex
+ * @lock: The rt_mutex to be unlocked
+ *
+ * The function does no lockdep operations on @lock. The lockdep state
+ * changes have to be done on the callsite related to the locking primitive
+ * which embeds the rtmutex. Otherwise lockdep has double tracking.
+ */
+void rwsem_rt_mutex_unlock(struct rt_mutex *lock)
+{
+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
+ return;
+
+ rt_mutex_slowunlock(lock);
+}
#endif
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -66,6 +66,9 @@ extern void rt_mutex_postunlock(struct w

/* Special interfaces for RT lock substitutions */
int rwsem_rt_mutex_slowlock_locked(struct rt_mutex *lock, unsigned int state);
+int rwsem_rt_mutex_lock_state(struct rt_mutex *lock, unsigned int state);
+int rwsem_rt_mutex_trylock(struct rt_mutex *lock);
+void rwsem_rt_mutex_unlock(struct rt_mutex *lock);

/*
* Must be guarded because this header is included from rcu/tree_plugin.h