On Wed, Mar 12, 2025 at 03:11:36PM -0700, John Stultz wrote:
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b4f7b14f62a24..3596244f613f8 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6722,6 +6722,23 @@ find_proxy_task(struct rq *rq, struct task_struct *donor, struct rq_flags *rf)
}
#endif /* SCHED_PROXY_EXEC */
+static inline void proxy_tag_curr(struct rq *rq, struct task_struct *owner)
+{
+ if (!sched_proxy_exec())
+ return;
+ /*
+ * pick_next_task() calls set_next_task() on the chosen task
+ * at some point, which ensures it is not push/pullable.
+ * However, the chosen/donor task *and* the mutex owner form an
+ * atomic pair wrt push/pull.
+ *
+ * Make sure owner we run is not pushable. Unfortunately we can
+ * only deal with that by means of a dequeue/enqueue cycle. :-/
+ */
+ dequeue_task(rq, owner, DEQUEUE_NOCLOCK | DEQUEUE_SAVE);
+ enqueue_task(rq, owner, ENQUEUE_NOCLOCK | ENQUEUE_RESTORE);
+}
So this is probably fine at this point; but we should eventually look at
fixing this.
We can probably look at (ab)using sched_class::set_cpus_allowed() for
this.