[PATCH 3/3] sched: Use cpu_dying() to fix balance_push vs hotplug-rollback

From: Peter Zijlstra
Date: Wed Mar 10 2021 - 10:02:58 EST


Use the new cpu_dying() state to simplify and fix the balance_push()
vs CPU hotplug rollback state.

Specifically, we currently rely on notifiers sched_cpu_dying() /
sched_cpu_activate() to terminate balance_push, however if the
cpu_down() fails when we're past sched_cpu_deactivate(), it should
terminate balance_push at that point and not wait until we hit
sched_cpu_activate().

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/sched/core.c | 33 ++++++++++++++++++++-------------
kernel/sched/sched.h | 1 -
2 files changed, 20 insertions(+), 14 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1811,7 +1811,7 @@ static inline bool is_cpu_allowed(struct
return cpu_online(cpu);

/* Regular kernel threads don't get to stay during offline. */
- if (cpu_rq(cpu)->balance_push)
+ if (cpu_dying(cpu))
return false;

/* But are allowed during online. */
@@ -7647,12 +7647,19 @@ static void balance_push(struct rq *rq)

lockdep_assert_held(&rq->lock);
SCHED_WARN_ON(rq->cpu != smp_processor_id());
+
/*
* Ensure the thing is persistent until balance_push_set(.on = false);
*/
rq->balance_callback = &balance_push_callback;

/*
+ * Only active while going offline.
+ */
+ if (!cpu_dying(rq->cpu))
+ return;
+
+ /*
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
*
@@ -7705,7 +7712,6 @@ static void balance_push_set(int cpu, bo
struct rq_flags rf;

rq_lock_irqsave(rq, &rf);
- rq->balance_push = on;
if (on) {
WARN_ON_ONCE(rq->balance_callback);
rq->balance_callback = &balance_push_callback;
@@ -7830,8 +7836,8 @@ int sched_cpu_activate(unsigned int cpu)
struct rq_flags rf;

/*
- * Make sure that when the hotplug state machine does a roll-back
- * we clear balance_push. Ideally that would happen earlier...
+ * Clear the balance_push callback and prepare to schedule
+ * regular tasks.
*/
balance_push_set(cpu, false);

@@ -7883,14 +7889,6 @@ int sched_cpu_deactivate(unsigned int cp
set_cpu_active(cpu, false);

/*
- * From this point forward, this CPU will refuse to run any task that
- * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
- * push those tasks away until this gets cleared, see
- * sched_cpu_dying().
- */
- balance_push_set(cpu, true);
-
- /*
* We've cleared cpu_active_mask / set balance_push, wait for all
* preempt-disabled and RCU users of this state to go away such that
* all new such users will observe it.
@@ -7910,6 +7908,14 @@ int sched_cpu_deactivate(unsigned int cp
}
rq_unlock_irqrestore(rq, &rf);

+ /*
+ * From this point forward, this CPU will refuse to run any task that
+ * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
+ * push those tasks away until this gets cleared, see
+ * sched_cpu_dying().
+ */
+ balance_push_set(cpu, true);
+
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
@@ -8206,7 +8212,7 @@ void __init sched_init(void)
rq->sd = NULL;
rq->rd = NULL;
rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
- rq->balance_callback = NULL;
+ rq->balance_callback = &balance_push_callback;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
@@ -8253,6 +8259,7 @@ void __init sched_init(void)

#ifdef CONFIG_SMP
idle_thread_set_boot_cpu();
+ balance_push_set(smp_processor_id(), false);
#endif
init_sched_fair_class();

--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -982,7 +982,6 @@ struct rq {
unsigned long cpu_capacity_orig;

struct callback_head *balance_callback;
- unsigned char balance_push;

unsigned char nohz_idle_balance;
unsigned char idle_balance;