[PATCH 1/2] sched_ext: Extract scx_dump_cpu() from scx_dump_state()
From: Changwoo Min
Date: Tue Apr 07 2026 - 23:12:02 EST
Factor out the per-CPU state dump logic from the for_each_possible_cpu
loop in scx_dump_state() into a new scx_dump_cpu() helper to improve
readability. No functional change.
Signed-off-by: Changwoo Min <changwoo@xxxxxxxxxx>
---
kernel/sched/ext.c | 173 +++++++++++++++++++++++----------------------
1 file changed, 90 insertions(+), 83 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index b757b853b42b..8f7d5c1556be 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -6190,6 +6190,95 @@ static void scx_dump_task(struct scx_sched *sch,
}
}
+static void scx_dump_cpu(struct scx_sched *sch, struct seq_buf *s,
+ struct scx_dump_ctx *dctx, int cpu,
+ bool dump_all_tasks)
+{
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+ struct task_struct *p;
+ struct seq_buf ns;
+ size_t avail, used;
+ char *buf;
+ bool idle;
+
+ rq_lock_irqsave(rq, &rf);
+
+ idle = list_empty(&rq->scx.runnable_list) &&
+ rq->curr->sched_class == &idle_sched_class;
+
+ if (idle && !SCX_HAS_OP(sch, dump_cpu))
+ goto next;
+
+ /*
+ * We don't yet know whether ops.dump_cpu() will produce output
+ * and we may want to skip the default CPU dump if it doesn't.
+ * Use a nested seq_buf to generate the standard dump so that we
+ * can decide whether to commit later.
+ */
+ avail = seq_buf_get_buf(s, &buf);
+ seq_buf_init(&ns, buf, avail);
+
+ dump_newline(&ns);
+ dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu ksync=%lu",
+ cpu, rq->scx.nr_running, rq->scx.flags,
+ rq->scx.cpu_released, rq->scx.ops_qseq,
+ rq->scx.kick_sync);
+ dump_line(&ns, " curr=%s[%d] class=%ps",
+ rq->curr->comm, rq->curr->pid,
+ rq->curr->sched_class);
+ if (!cpumask_empty(rq->scx.cpus_to_kick))
+ dump_line(&ns, " cpus_to_kick : %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_kick));
+ if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
+ dump_line(&ns, " idle_to_kick : %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
+ if (!cpumask_empty(rq->scx.cpus_to_preempt))
+ dump_line(&ns, " cpus_to_preempt: %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_preempt));
+ if (!cpumask_empty(rq->scx.cpus_to_wait))
+ dump_line(&ns, " cpus_to_wait : %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_wait));
+ if (!cpumask_empty(rq->scx.cpus_to_sync))
+ dump_line(&ns, " cpus_to_sync : %*pb",
+ cpumask_pr_args(rq->scx.cpus_to_sync));
+
+ used = seq_buf_used(&ns);
+ if (SCX_HAS_OP(sch, dump_cpu)) {
+ ops_dump_init(&ns, " ");
+ SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL,
+ dctx, cpu, idle);
+ ops_dump_exit();
+ }
+
+ /*
+ * If idle && nothing generated by ops.dump_cpu(), there's
+ * nothing interesting. Skip.
+ */
+ if (idle && used == seq_buf_used(&ns))
+ goto next;
+
+ /*
+ * $s may already have overflowed when $ns was created. If so,
+ * calling commit on it will trigger BUG.
+ */
+ if (avail) {
+ seq_buf_commit(s, seq_buf_used(&ns));
+ if (seq_buf_has_overflowed(&ns))
+ seq_buf_set_overflow(s);
+ }
+
+ if (rq->curr->sched_class == &ext_sched_class &&
+ (dump_all_tasks || scx_task_on_sched(sch, rq->curr)))
+ scx_dump_task(sch, s, dctx, rq->curr, '*');
+
+ list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
+ if (dump_all_tasks || scx_task_on_sched(sch, p))
+ scx_dump_task(sch, s, dctx, p, ' ');
+next:
+ rq_unlock_irqrestore(rq, &rf);
+}
+
/*
* Dump scheduler state. If @dump_all_tasks is true, dump all tasks regardless
* of which scheduler they belong to. If false, only dump tasks owned by @sch.
@@ -6210,7 +6299,6 @@ static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei,
};
struct seq_buf s;
struct scx_event_stats events;
- char *buf;
int cpu;
guard(raw_spinlock_irqsave)(&scx_dump_lock);
@@ -6250,88 +6338,7 @@ static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei,
dump_line(&s, "----------");
for_each_possible_cpu(cpu) {
- struct rq *rq = cpu_rq(cpu);
- struct rq_flags rf;
- struct task_struct *p;
- struct seq_buf ns;
- size_t avail, used;
- bool idle;
-
- rq_lock_irqsave(rq, &rf);
-
- idle = list_empty(&rq->scx.runnable_list) &&
- rq->curr->sched_class == &idle_sched_class;
-
- if (idle && !SCX_HAS_OP(sch, dump_cpu))
- goto next;
-
- /*
- * We don't yet know whether ops.dump_cpu() will produce output
- * and we may want to skip the default CPU dump if it doesn't.
- * Use a nested seq_buf to generate the standard dump so that we
- * can decide whether to commit later.
- */
- avail = seq_buf_get_buf(&s, &buf);
- seq_buf_init(&ns, buf, avail);
-
- dump_newline(&ns);
- dump_line(&ns, "CPU %-4d: nr_run=%u flags=0x%x cpu_rel=%d ops_qseq=%lu ksync=%lu",
- cpu, rq->scx.nr_running, rq->scx.flags,
- rq->scx.cpu_released, rq->scx.ops_qseq,
- rq->scx.kick_sync);
- dump_line(&ns, " curr=%s[%d] class=%ps",
- rq->curr->comm, rq->curr->pid,
- rq->curr->sched_class);
- if (!cpumask_empty(rq->scx.cpus_to_kick))
- dump_line(&ns, " cpus_to_kick : %*pb",
- cpumask_pr_args(rq->scx.cpus_to_kick));
- if (!cpumask_empty(rq->scx.cpus_to_kick_if_idle))
- dump_line(&ns, " idle_to_kick : %*pb",
- cpumask_pr_args(rq->scx.cpus_to_kick_if_idle));
- if (!cpumask_empty(rq->scx.cpus_to_preempt))
- dump_line(&ns, " cpus_to_preempt: %*pb",
- cpumask_pr_args(rq->scx.cpus_to_preempt));
- if (!cpumask_empty(rq->scx.cpus_to_wait))
- dump_line(&ns, " cpus_to_wait : %*pb",
- cpumask_pr_args(rq->scx.cpus_to_wait));
- if (!cpumask_empty(rq->scx.cpus_to_sync))
- dump_line(&ns, " cpus_to_sync : %*pb",
- cpumask_pr_args(rq->scx.cpus_to_sync));
-
- used = seq_buf_used(&ns);
- if (SCX_HAS_OP(sch, dump_cpu)) {
- ops_dump_init(&ns, " ");
- SCX_CALL_OP(sch, SCX_KF_REST, dump_cpu, NULL,
- &dctx, cpu, idle);
- ops_dump_exit();
- }
-
- /*
- * If idle && nothing generated by ops.dump_cpu(), there's
- * nothing interesting. Skip.
- */
- if (idle && used == seq_buf_used(&ns))
- goto next;
-
- /*
- * $s may already have overflowed when $ns was created. If so,
- * calling commit on it will trigger BUG.
- */
- if (avail) {
- seq_buf_commit(&s, seq_buf_used(&ns));
- if (seq_buf_has_overflowed(&ns))
- seq_buf_set_overflow(&s);
- }
-
- if (rq->curr->sched_class == &ext_sched_class &&
- (dump_all_tasks || scx_task_on_sched(sch, rq->curr)))
- scx_dump_task(sch, &s, &dctx, rq->curr, '*');
-
- list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
- if (dump_all_tasks || scx_task_on_sched(sch, p))
- scx_dump_task(sch, &s, &dctx, p, ' ');
- next:
- rq_unlock_irqrestore(rq, &rf);
+ scx_dump_cpu(sch, &s, &dctx, cpu, dump_all_tasks);
}
dump_newline(&s);
--
2.53.0