[PATCH v23 7/9] sched: Have try_to_wake_up() handle return-migration for PROXY_WAKING case
From: John Stultz
Date: Wed Oct 29 2025 - 20:19:13 EST
This patch adds logic so try_to_wake_up() will notice if we are
waking a task where blocked_on == PROXY_WAKING, and if necessary
dequeue the task so the wakeup will naturally return-migrate the
donor task back to a cpu it can run on.
This helps performance as we do the dequeue and wakeup under the
locks normally taken in the try_to_wake_up() and avoids having
to do proxy_force_return() from __schedule(), which has to
re-take similar locks and then force a pick again loop.
This was split out from the larger proxy patch, and
significantly reworked.
Credits for the original patch go to:
Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Juri Lelli <juri.lelli@xxxxxxxxxx>
Valentin Schneider <valentin.schneider@xxxxxxx>
Connor O'Brien <connoro@xxxxxxxxxx>
Signed-off-by: John Stultz <jstultz@xxxxxxxxxx>
---
Cc: Joel Fernandes <joelagnelf@xxxxxxxxxx>
Cc: Qais Yousef <qyousef@xxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Juri Lelli <juri.lelli@xxxxxxxxxx>
Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
Cc: Dietmar Eggemann <dietmar.eggemann@xxxxxxx>
Cc: Valentin Schneider <vschneid@xxxxxxxxxx>
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: Ben Segall <bsegall@xxxxxxxxxx>
Cc: Zimuzo Ezeozue <zezeozue@xxxxxxxxxx>
Cc: Mel Gorman <mgorman@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Waiman Long <longman@xxxxxxxxxx>
Cc: Boqun Feng <boqun.feng@xxxxxxxxx>
Cc: "Paul E. McKenney" <paulmck@xxxxxxxxxx>
Cc: Metin Kaya <Metin.Kaya@xxxxxxx>
Cc: Xuewen Yan <xuewen.yan94@xxxxxxxxx>
Cc: K Prateek Nayak <kprateek.nayak@xxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Daniel Lezcano <daniel.lezcano@xxxxxxxxxx>
Cc: Suleiman Souhlal <suleiman@xxxxxxxxxx>
Cc: kuyo chang <kuyo.chang@xxxxxxxxxxxx>
Cc: hupu <hupu.gm@xxxxxxxxx>
Cc: kernel-team@xxxxxxxxxxx
---
kernel/sched/core.c | 74 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 72 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3cf5e75abf21e..4546ceb8eae56 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3697,6 +3697,56 @@ static inline void proxy_set_task_cpu(struct task_struct *p, int cpu)
__set_task_cpu(p, cpu);
p->wake_cpu = wake_cpu;
}
+
+static bool proxy_task_runnable_but_waking(struct task_struct *p)
+{
+ if (!sched_proxy_exec())
+ return false;
+ return (READ_ONCE(p->__state) == TASK_RUNNING &&
+ READ_ONCE(p->blocked_on) == PROXY_WAKING);
+}
+
+static inline struct task_struct *proxy_resched_idle(struct rq *rq);
+
+/*
+ * Checks to see if task p has been proxy-migrated to another rq
+ * and needs to be returned. If so, we deactivate the task here
+ * so that it can be properly woken up on the p->wake_cpu
+ * (or whichever cpu select_task_rq() picks at the bottom of
+ * try_to_wake_up()
+ */
+static inline bool proxy_needs_return(struct rq *rq, struct task_struct *p)
+{
+ bool ret = false;
+
+ if (!sched_proxy_exec())
+ return false;
+
+ raw_spin_lock(&p->blocked_lock);
+ if (p->blocked_on == PROXY_WAKING) {
+ if (!task_current(rq, p) && p->wake_cpu != cpu_of(rq)) {
+ if (task_current_donor(rq, p))
+ proxy_resched_idle(rq);
+
+ deactivate_task(rq, p, DEQUEUE_NOCLOCK);
+ ret = true;
+ }
+ __clear_task_blocked_on(p, PROXY_WAKING);
+ resched_curr(rq);
+ }
+ raw_spin_unlock(&p->blocked_lock);
+ return ret;
+}
+#else /* !CONFIG_SCHED_PROXY_EXEC */
+static bool proxy_task_runnable_but_waking(struct task_struct *p)
+{
+ return false;
+}
+
+static inline bool proxy_needs_return(struct rq *rq, struct task_struct *p)
+{
+ return false;
+}
#endif /* CONFIG_SCHED_PROXY_EXEC */
static void
@@ -3784,6 +3834,8 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
update_rq_clock(rq);
if (p->se.sched_delayed)
enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
+ if (proxy_needs_return(rq, p))
+ goto out;
if (!task_on_cpu(rq, p)) {
/*
* When on_rq && !on_cpu the task is preempted, see if
@@ -3794,6 +3846,7 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
ttwu_do_wakeup(p);
ret = 1;
}
+out:
__task_rq_unlock(rq, &rf);
return ret;
@@ -3924,6 +3977,14 @@ static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
return false;
#endif
+ /*
+ * If we're PROXY_WAKING, we have deactivated on this cpu, so we should
+ * activate it here as well, to avoid IPI'ing a cpu that is stuck in
+ * task_rq_lock() spinning on p->on_rq, deadlocking that cpu.
+ */
+ if (task_on_rq_migrating(p))
+ return false;
+
/*
* Do not complicate things with the async wake_list while the CPU is
* in hotplug state.
@@ -4181,6 +4242,8 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* it disabling IRQs (this allows not taking ->pi_lock).
*/
WARN_ON_ONCE(p->se.sched_delayed);
+ /* If p is current, we know we can run here, so clear blocked_on */
+ clear_task_blocked_on(p, NULL);
if (!ttwu_state_match(p, state, &success))
goto out;
@@ -4197,8 +4260,15 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
smp_mb__after_spinlock();
- if (!ttwu_state_match(p, state, &success))
- break;
+ if (!ttwu_state_match(p, state, &success)) {
+ /*
+ * If we're already TASK_RUNNING, and PROXY_WAKING
+ * continue on to ttwu_runnable check to force
+ * proxy_needs_return evaluation
+ */
+ if (!proxy_task_runnable_but_waking(p))
+ break;
+ }
trace_sched_waking(p);
--
2.51.1.930.gacf6e81ea2-goog