[PATCH RFC tip/core/rcu] Avoid resched_cpu() when rescheduling the current CPU
From: Paul E. McKenney
Date: Fri Jul 27 2018 - 11:49:38 EST
Hello, Peter,
It occurred to me that it is wasteful to let resched_cpu() acquire
->pi_lock when doing something like resched_cpu(smp_processor_id()),
and that it would be better to instead use set_tsk_need_resched(current)
and set_preempt_need_resched().
But is doing so really worthwhile? For that matter, are there some
constraints on the use of those two functions that I am failing to
allow for in the patch below?
Thanx, Paul
------------------------------------------------------------------------
commit e95e2d26fff60af9bb4111a9c17461ecd5e17a7d
Author: Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
Date: Thu Jul 26 13:44:00 2018 -0700
rcu: Avoid resched_cpu() when rescheduling the current CPU
The resched_cpu() interface is quite handy, but it does acquire the
specified CPU's runqueue lock, which does not come for free. This
commit therefore substitutes the following when directing resched_cpu()
at the current CPU:
set_tsk_need_resched(current);
set_preempt_need_resched();
Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 595059141c40..061ceb171d8e 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1353,7 +1353,8 @@ static void print_cpu_stall(void)
* progress and it could be we're stuck in kernel space without context
* switches for an entirely unreasonable amount of time.
*/
- resched_cpu(smp_processor_id());
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
}
static void check_cpu_stall(struct rcu_data *rdp)
@@ -2674,10 +2675,12 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
WARN_ON_ONCE(!rdp->beenonline);
/* Report any deferred quiescent states if preemption enabled. */
- if (!(preempt_count() & PREEMPT_MASK))
+ if (!(preempt_count() & PREEMPT_MASK)) {
rcu_preempt_deferred_qs(current);
- else if (rcu_preempt_need_deferred_qs(current))
- resched_cpu(rdp->cpu); /* Provoke future context switch. */
+ } else if (rcu_preempt_need_deferred_qs(current)) {
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+ }
/* Update RCU state based on any recent quiescent states. */
rcu_check_quiescent_state(rdp);
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index b3e2c873b8e4..62d363d7fab2 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -672,7 +672,8 @@ static void sync_rcu_exp_handler(void *unused)
rcu_report_exp_rdp(rdp);
} else {
rdp->deferred_qs = true;
- resched_cpu(rdp->cpu);
+ set_tsk_need_resched(t);
+ set_preempt_need_resched();
}
return;
}
@@ -710,15 +711,16 @@ static void sync_rcu_exp_handler(void *unused)
* because we are in an interrupt handler, which will cause that
* function to take an early exit without doing anything.
*
- * Otherwise, use resched_cpu() to force a context switch after
- * the CPU enables everything.
+ * Otherwise, force a context switch after the CPU enables everything.
*/
rdp->deferred_qs = true;
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
- WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs()))
+ WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
rcu_preempt_deferred_qs(t);
- else
- resched_cpu(rdp->cpu);
+ } else {
+ set_tsk_need_resched(t);
+ set_preempt_need_resched();
+ }
}
/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
@@ -779,7 +781,8 @@ static void sync_sched_exp_handler(void *unused)
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
/* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
- resched_cpu(smp_processor_id());
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
}
/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 5f4c8bab7c72..d3ccf4389a67 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -791,8 +791,10 @@ static void rcu_flavor_check_callbacks(int user)
if (t->rcu_read_lock_nesting > 0 ||
(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
/* No QS, force context switch if deferred. */
- if (rcu_preempt_need_deferred_qs(t))
- resched_cpu(smp_processor_id());
+ if (rcu_preempt_need_deferred_qs(t)) {
+ set_tsk_need_resched(t);
+ set_preempt_need_resched();
+ }
} else if (rcu_preempt_need_deferred_qs(t)) {
rcu_preempt_deferred_qs(t); /* Report deferred QS. */
return;