[rt-patch 2/3] sched: Introduce raw_cond_resched_lock()
From: Mike Galbraith
Date: Sat Jul 28 2018 - 05:07:54 EST
Add raw_cond_resched_lock() infrastructure.
Signed-off-by: Mike Galbraith <efault@xxxxxx>
---
include/linux/sched.h | 15 +++++++++++++++
kernel/sched/core.c | 20 ++++++++++++++++++++
2 files changed, 35 insertions(+)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1779,12 +1779,18 @@ static inline int _cond_resched(void) {
})
extern int __cond_resched_lock(spinlock_t *lock);
+extern int __raw_cond_resched_lock(raw_spinlock_t *lock);
#define cond_resched_lock(lock) ({ \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
})
+#define raw_cond_resched_lock(lock) ({ \
+ ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
+ __raw_cond_resched_lock(lock); \
+})
+
#ifndef CONFIG_PREEMPT_RT_FULL
extern int __cond_resched_softirq(void);
@@ -1817,6 +1823,15 @@ static inline int spin_needbreak(spinloc
#else
return 0;
#endif
+}
+
+static inline int raw_spin_needbreak(raw_spinlock_t *lock)
+{
+#ifdef CONFIG_PREEMPT
+ return raw_spin_is_contended(lock);
+#else
+ return 0;
+#endif
}
static __always_inline bool need_resched(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5065,6 +5065,26 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
+int __raw_cond_resched_lock(raw_spinlock_t *lock)
+{
+ int resched = should_resched(PREEMPT_LOCK_OFFSET);
+ int ret = 0;
+
+ lockdep_assert_held(lock);
+
+ if (raw_spin_needbreak(lock) || resched) {
+ raw_spin_unlock(lock);
+ if (resched)
+ preempt_schedule_common();
+ else
+ cpu_relax();
+ ret = 1;
+ raw_spin_lock(lock);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__raw_cond_resched_lock);
+
#ifndef CONFIG_PREEMPT_RT_FULL
int __sched __cond_resched_softirq(void)
{