[PATCH 2/4] sched_ext: Add rq parameter to dispatch_enqueue()

From: Andrea Righi

Date: Sun Feb 15 2026 - 14:20:02 EST


This prepares for a later commit fixing the ops.dequeue() semantics.
No functional change intended.

Signed-off-by: Andrea Righi <arighi@xxxxxxxxxx>
---
kernel/sched/ext.c | 25 +++++++++++++------------
1 file changed, 13 insertions(+), 12 deletions(-)

diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 3885dd06dd573..9820026b74557 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1010,8 +1010,9 @@ static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p
resched_curr(rq);
}

-static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
- struct task_struct *p, u64 enq_flags)
+static void dispatch_enqueue(struct scx_sched *sch, struct rq *rq,
+ struct scx_dispatch_q *dsq, struct task_struct *p,
+ u64 enq_flags)
{
bool is_local = dsq->id == SCX_DSQ_LOCAL;

@@ -1325,7 +1326,7 @@ static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
return;
}

- dispatch_enqueue(sch, dsq, p,
+ dispatch_enqueue(sch, rq, dsq, p,
p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
}

@@ -1415,7 +1416,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
direct_dispatch(sch, p, enq_flags);
return;
local_norefill:
- dispatch_enqueue(sch, &rq->scx.local_dsq, p, enq_flags);
+ dispatch_enqueue(sch, rq, &rq->scx.local_dsq, p, enq_flags);
return;
local:
dsq = &rq->scx.local_dsq;
@@ -1435,7 +1436,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
*/
touch_core_sched(rq, p);
refill_task_slice_dfl(sch, p);
- dispatch_enqueue(sch, dsq, p, enq_flags);
+ dispatch_enqueue(sch, rq, dsq, p, enq_flags);
}

static bool task_runnable(const struct task_struct *p)
@@ -1888,7 +1889,7 @@ static struct rq *move_task_between_dsqs(struct scx_sched *sch,
dispatch_dequeue_locked(p, src_dsq);
raw_spin_unlock(&src_dsq->lock);

- dispatch_enqueue(sch, dst_dsq, p, enq_flags);
+ dispatch_enqueue(sch, dst_rq, dst_dsq, p, enq_flags);
}

return dst_rq;
@@ -1978,14 +1979,14 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
* If dispatching to @rq that @p is already on, no lock dancing needed.
*/
if (rq == src_rq && rq == dst_rq) {
- dispatch_enqueue(sch, dst_dsq, p,
+ dispatch_enqueue(sch, rq, dst_dsq, p,
enq_flags | SCX_ENQ_CLEAR_OPSS);
return;
}

if (src_rq != dst_rq &&
unlikely(!task_can_run_on_remote_rq(sch, p, dst_rq, true))) {
- dispatch_enqueue(sch, find_global_dsq(sch, p), p,
+ dispatch_enqueue(sch, rq, find_global_dsq(sch, p), p,
enq_flags | SCX_ENQ_CLEAR_OPSS);
return;
}
@@ -2023,7 +2024,7 @@ static void dispatch_to_local_dsq(struct scx_sched *sch, struct rq *rq,
*/
if (src_rq == dst_rq) {
p->scx.holding_cpu = -1;
- dispatch_enqueue(sch, &dst_rq->scx.local_dsq, p,
+ dispatch_enqueue(sch, dst_rq, &dst_rq->scx.local_dsq, p,
enq_flags);
} else {
move_remote_task_to_local_dsq(p, enq_flags,
@@ -2122,7 +2123,7 @@ static void finish_dispatch(struct scx_sched *sch, struct rq *rq,
if (dsq->id == SCX_DSQ_LOCAL)
dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
else
- dispatch_enqueue(sch, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
+ dispatch_enqueue(sch, rq, dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS);
}

static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
@@ -2423,7 +2424,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
* DSQ.
*/
if (p->scx.slice && !scx_rq_bypassing(rq)) {
- dispatch_enqueue(sch, &rq->scx.local_dsq, p,
+ dispatch_enqueue(sch, rq, &rq->scx.local_dsq, p,
SCX_ENQ_HEAD);
goto switch_class;
}
@@ -3954,7 +3955,7 @@ static u32 bypass_lb_cpu(struct scx_sched *sch, struct rq *rq,
* between bypass DSQs.
*/
dispatch_dequeue_locked(p, donor_dsq);
- dispatch_enqueue(sch, donee_dsq, p, SCX_ENQ_NESTED);
+ dispatch_enqueue(sch, donee_rq, donee_dsq, p, SCX_ENQ_NESTED);

/*
* $donee might have been idle and need to be woken up. No need
--
2.53.0