[PATCH 13/19] rcu/context-tracking: Move deferred nocb resched to context tracking
From: Frederic Weisbecker
Date: Wed Mar 02 2022 - 10:49:54 EST
To prepare for migrating the RCU eqs accounting code to context tracking,
split the last-resort deferred nocb resched from rcu_user_enter() and
move it into a separate call from context tracking.
Signed-off-by: Frederic Weisbecker <frederic@xxxxxxxxxx>
Cc: Paul E. McKenney <paulmck@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Neeraj Upadhyay <quic_neeraju@xxxxxxxxxxx>
Cc: Uladzislau Rezki <uladzislau.rezki@xxxxxxxx>
Cc: Joel Fernandes <joel@xxxxxxxxxxxxxxxxx>
Cc: Boqun Feng <boqun.feng@xxxxxxxxx>
Cc: Nicolas Saenz Julienne <nsaenz@xxxxxxxxxx>
Cc: Marcelo Tosatti <mtosatti@xxxxxxxxxx>
Cc: Xiongfeng Wang <wangxiongfeng2@xxxxxxxxxx>
Cc: Yu Liao<liaoyu15@xxxxxxxxxx>
Cc: Phil Auld <pauld@xxxxxxxxxx>
Cc: Paul Gortmaker<paul.gortmaker@xxxxxxxxxxxxx>
Cc: Alex Belits <abelits@xxxxxxxxxxx>
---
include/linux/rcutree.h | 6 ++++++
kernel/context_tracking.c | 8 ++++++++
kernel/rcu/tree.c | 15 ++-------------
3 files changed, 16 insertions(+), 13 deletions(-)
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index e05334c4c3d1..6d111a3c0cc0 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -78,4 +78,10 @@ int rcutree_dead_cpu(unsigned int cpu);
int rcutree_dying_cpu(unsigned int cpu);
void rcu_cpu_starting(unsigned int cpu);
+#if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
+void rcu_irq_work_resched(void);
+#else
+static inline void rcu_irq_work_resched(void) { }
+#endif
+
#endif /* __LINUX_RCUTREE_H */
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 155534c409fc..7be7a2044d3a 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -60,6 +60,8 @@ static __always_inline void context_tracking_recursion_exit(void)
*/
void noinstr __ct_user_enter(enum ctx_state state)
{
+ lockdep_assert_irqs_disabled();
+
/* Kernel threads aren't supposed to go to userspace */
WARN_ON_ONCE(!current->mm);
@@ -81,6 +83,12 @@ void noinstr __ct_user_enter(enum ctx_state state)
vtime_user_enter(current);
instrumentation_end();
}
+ /*
+ * Other than generic entry implementation, we may be past the last
+ * rescheduling opportunity in the entry code. Trigger a self IPI
+ * that will fire and reschedule once we resume in user/guest mode.
+ */
+ rcu_irq_work_resched();
rcu_user_enter();
}
/*
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c2528e65de0c..938537958c27 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -686,7 +686,7 @@ static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
* last resort is to fire a local irq_work that will trigger a reschedule once IRQs
* get re-enabled again.
*/
-noinstr static void rcu_irq_work_resched(void)
+noinstr void rcu_irq_work_resched(void)
{
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -702,10 +702,7 @@ noinstr static void rcu_irq_work_resched(void)
}
instrumentation_end();
}
-
-#else
-static inline void rcu_irq_work_resched(void) { }
-#endif
+#endif /* #if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK) */
/**
* rcu_user_enter - inform RCU that we are resuming userspace.
@@ -720,14 +717,6 @@ static inline void rcu_irq_work_resched(void) { }
*/
noinstr void rcu_user_enter(void)
{
- lockdep_assert_irqs_disabled();
-
- /*
- * Other than generic entry implementation, we may be past the last
- * rescheduling opportunity in the entry code. Trigger a self IPI
- * that will fire and reschedule once we resume in user/guest mode.
- */
- rcu_irq_work_resched();
rcu_eqs_enter(true);
}
--
2.25.1