[PATCH] locking/rtmutex: Provide proper spin_is_contended
From: Zhipeng Shi
Date: Tue Jun 07 2022 - 05:24:28 EST
Commit d89c70356acf ("locking/core: Remove break_lock field when
CONFIG_GENERIC_LOCKBREAK=y") removed GENERIC_LOCKBREAK, which caused
spin_is_contended depend on the implementation of arch_spin_is_contended.
But now in rt-spinlock, spin_is_contended returns 0 directly.
This causes cond_resched_lock to fail to correctly detect lock contention
in RT-linux. In some scenarios (such as __purge_vmap_area_lazy in vmalloc),
this will cause a large latency.
This patch provides the implementation of spin_is_contended for
rt-spinlock.
Signed-off-by: Zhipeng Shi <zhipeng.shi0@xxxxxxxxx>
---
include/linux/rtmutex.h | 2 ++
include/linux/spinlock_rt.h | 13 ++++++++++++-
kernel/locking/rtmutex_common.h | 2 --
3 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 7d049883a08a..cd7ac1785c6a 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -70,6 +70,8 @@ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
static inline void rt_mutex_debug_task_free(struct task_struct *tsk) { }
#endif
+#define RT_MUTEX_HAS_WAITERS 1UL
+
#define rt_mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index 835aedaf68ac..54abf2b50494 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -145,7 +145,18 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
#define spin_trylock_irqsave(lock, flags) \
__cond_lock(lock, __spin_trylock_irqsave(lock, flags))
-#define spin_is_contended(lock) (((void)(lock), 0))
+/**
+ * spin_is_contended - check if the lock is contended
+ * @lock : Pointer to spinlock structure
+ *
+ * Return: 1 if lock is contended, 0 otherwise
+ */
+static inline int spin_is_contended(spinlock_t *lock)
+{
+ unsigned long *p = (unsigned long *) &lock->lock.owner;
+
+ return (READ_ONCE(*p) & RT_MUTEX_HAS_WAITERS);
+}
static inline int spin_is_locked(spinlock_t *lock)
{
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index c47e8361bfb5..70c765a26163 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -131,8 +131,6 @@ static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
pi_tree_entry);
}
-#define RT_MUTEX_HAS_WAITERS 1UL
-
static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
{
unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
--
2.25.1