[PATCH v2 03/11] sched_ext: Add an event, SELECT_CPU_FALLBACK
From: Changwoo Min
Date: Sun Jan 26 2025 - 05:16:49 EST
Add a core event, SELECT_CPU_FALLBACK, which represents how many times
ops.select_cpu() returns a CPU that the task can't use.
__scx_add_event() is used since the caller holds a p->pi_lock,
so the preemption has already been disabled.
Signed-off-by: Changwoo Min <changwoo@xxxxxxxxxx>
---
kernel/sched/ext.c | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index dbf659d593a3..727b1f8b623e 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1462,6 +1462,11 @@ static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
* Collection of event counters. Event types are placed in descending order.
*/
struct scx_event_stats {
+ /*
+ * If ops.select_cpu() returns a CPU which can't be used by the task,
+ * the core scheduler code silently picks a fallback CPU.
+ */
+ u64 SELECT_CPU_FALLBACK;
};
/*
@@ -3663,6 +3668,10 @@ static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flag
cpu = SCX_CALL_OP_TASK_RET(SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU,
select_cpu, p, prev_cpu, wake_flags);
*ddsp_taskp = NULL;
+
+ if (unlikely(!is_cpu_allowed(p, cpu)))
+ __scx_add_event(SELECT_CPU_FALLBACK, 1);
+
if (ops_cpu_valid(cpu, "from ops.select_cpu()"))
return cpu;
else
@@ -5482,6 +5491,7 @@ static void scx_dump_state(struct scx_exit_info *ei, size_t dump_len)
dump_line(&s, "--------------");
scx_bpf_event_stats(&events, sizeof(events));
+ scx_dump_event(s, &events, SELECT_CPU_FALLBACK);
if (seq_buf_has_overflowed(&s) && dump_len >= sizeof(trunc_marker))
memcpy(ei->dump + dump_len - sizeof(trunc_marker),
@@ -7810,6 +7820,7 @@ __bpf_kfunc void scx_bpf_event_stats(struct scx_event_stats *events,
memset(&e_sys, 0, sizeof(e_sys));
for_each_possible_cpu(cpu) {
e_cpu = per_cpu_ptr(&event_stats_cpu, cpu);
+ scx_agg_event(&e_sys, e_cpu, SELECT_CPU_FALLBACK);
}
/*
--
2.48.1