[PATCH RT 2/4] sched: migrate_enable: Use sleeping_lock to indicate involuntary sleep
From: Scott Wood
Date: Tue Jun 18 2019 - 21:24:38 EST
Without this, rcu_note_context_switch() will complain if an RCU read
lock is held when migrate_enable() calls stop_one_cpu().
Signed-off-by: Scott Wood <swood@xxxxxxxxxx>
---
include/linux/sched.h | 4 ++--
kernel/rcu/tree_plugin.h | 2 +-
kernel/sched/core.c | 2 ++
3 files changed, 5 insertions(+), 3 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e1ea2ea52feb..9b8334c24dad 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -681,7 +681,7 @@ struct task_struct {
int migrate_disable_atomic;
# endif
#endif
-#ifdef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT_BASE
int sleeping_lock;
#endif
@@ -1870,7 +1870,7 @@ static __always_inline bool need_resched(void)
return unlikely(tif_need_resched());
}
-#ifdef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT_BASE
static inline void sleeping_lock_inc(void)
{
current->sleeping_lock++;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 4bac3e5ee1ab..5d63914b3687 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -311,7 +311,7 @@ void rcu_note_context_switch(bool preempt)
barrier(); /* Avoid RCU read-side critical sections leaking down. */
trace_rcu_utilization(TPS("Start context switch"));
lockdep_assert_irqs_disabled();
-#if defined(CONFIG_PREEMPT_RT_FULL)
+#if defined(CONFIG_PREEMPT_RT_BASE)
sleeping_l = t->sleeping_lock;
#endif
WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a8493ff60b67..fce7d574ab5b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7351,7 +7351,9 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_lazy_enable();
preempt_enable();
+ sleeping_lock_inc();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ sleeping_lock_dec();
tlb_migrate_finish(p->mm);
return;
--
1.8.3.1