[PATCH 07/15] sched_ext: Wrap deferred_reenq_local_node into a struct
From: Tejun Heo
Date: Fri Mar 06 2026 - 14:06:53 EST
Wrap the deferred_reenq_local_node list_head into struct
scx_deferred_reenq_local. More fields will be added and this allows using a
shorthand pointer to access them.
No functional change.
Signed-off-by: Tejun Heo <tj@xxxxxxxxxx>
---
kernel/sched/ext.c | 22 +++++++++++++---------
kernel/sched/ext_internal.h | 6 +++++-
2 files changed, 18 insertions(+), 10 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index ffccaf04e34d..80d1e6ccc326 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -3648,15 +3648,19 @@ static void process_deferred_reenq_locals(struct rq *rq)
struct scx_sched *sch;
scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
- struct scx_sched_pcpu *sch_pcpu =
+ struct scx_deferred_reenq_local *drl =
list_first_entry_or_null(&rq->scx.deferred_reenq_locals,
- struct scx_sched_pcpu,
- deferred_reenq_local_node);
- if (!sch_pcpu)
+ struct scx_deferred_reenq_local,
+ node);
+ struct scx_sched_pcpu *sch_pcpu;
+
+ if (!drl)
return;
+ sch_pcpu = container_of(drl, struct scx_sched_pcpu,
+ deferred_reenq_local);
sch = sch_pcpu->sch;
- list_del_init(&sch_pcpu->deferred_reenq_local_node);
+ list_del_init(&drl->node);
}
reenq_local(sch, rq);
@@ -4200,7 +4204,7 @@ static void scx_sched_free_rcu_work(struct work_struct *work)
for_each_possible_cpu(cpu) {
struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
- WARN_ON_ONCE(!list_empty(&pcpu->deferred_reenq_local_node));
+ WARN_ON_ONCE(!list_empty(&pcpu->deferred_reenq_local.node));
}
free_percpu(sch->pcpu);
@@ -5813,7 +5817,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
struct scx_sched_pcpu *pcpu = per_cpu_ptr(sch->pcpu, cpu);
pcpu->sch = sch;
- INIT_LIST_HEAD(&pcpu->deferred_reenq_local_node);
+ INIT_LIST_HEAD(&pcpu->deferred_reenq_local.node);
}
sch->helper = kthread_run_worker(0, "sched_ext_helper");
@@ -8391,8 +8395,8 @@ __bpf_kfunc void scx_bpf_reenqueue_local___v2(const struct bpf_prog_aux *aux)
scoped_guard (raw_spinlock, &rq->scx.deferred_reenq_lock) {
struct scx_sched_pcpu *pcpu = this_cpu_ptr(sch->pcpu);
- if (list_empty(&pcpu->deferred_reenq_local_node))
- list_move_tail(&pcpu->deferred_reenq_local_node,
+ if (list_empty(&pcpu->deferred_reenq_local.node))
+ list_move_tail(&pcpu->deferred_reenq_local.node,
&rq->scx.deferred_reenq_locals);
}
diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h
index 80d40a9c5ad9..1a8d61097cab 100644
--- a/kernel/sched/ext_internal.h
+++ b/kernel/sched/ext_internal.h
@@ -954,6 +954,10 @@ struct scx_dsp_ctx {
struct scx_dsp_buf_ent buf[];
};
+struct scx_deferred_reenq_local {
+ struct list_head node;
+};
+
struct scx_sched_pcpu {
struct scx_sched *sch;
u64 flags; /* protected by rq lock */
@@ -965,7 +969,7 @@ struct scx_sched_pcpu {
*/
struct scx_event_stats event_stats;
- struct list_head deferred_reenq_local_node;
+ struct scx_deferred_reenq_local deferred_reenq_local;
struct scx_dispatch_q bypass_dsq;
#ifdef CONFIG_EXT_SUB_SCHED
u32 bypass_host_seq;
--
2.53.0