Re: [PATCH 05/12] sched: Move sched_class::prio_changed() into the change pattern
From: Pierre Gondois
Date: Mon Jan 12 2026 - 15:45:30 EST
Hello Peter,
It seems this patch:
6455ad5346c9 ("sched: Move sched_class::prio_changed() into the change pattern")
is triggering the following warning:
rq_pin_lock()
\-WARN_ON_ONCE(rq->balance_callback && rq->balance_callback != &balance_push_callback);
On an arm64 Juno, it can be reproduced by creating and killing a
deadline task:
chrt -d -T 1000000 -P 1000000 0 yes > /dev/null
[ 49.518832] Hardware name: ARM LTD ARM Juno Development Platform/ARM Juno Development Platform, BIOS EDK II Jul 11 2025
[ 49.518838] Call trace:
[ 49.518842] show_stack (arch/arm64/kernel/stacktrace.c:501) (C)
[ 49.518864] dump_stack_lvl (lib/dump_stack.c:122)
[ 49.518878] dump_stack (lib/dump_stack.c:130)
[ 49.518889] prio_changed_dl (kernel/sched/deadline.c:0 kernel/sched/deadline.c:3343)
[ 49.518903] sched_change_end (kernel/sched/core.c:0)
[ 49.518916] sched_move_task (kernel/sched/core.c:9167)
[ 49.518927] sched_autogroup_exit_task (kernel/sched/autogroup.c:157)
[ 49.518940] do_exit (kernel/exit.c:975)
[ 49.518950] do_group_exit (kernel/exit.c:0)
[ 49.518960] get_signal (kernel/signal.c:0)
[ 49.518970] arch_do_signal_or_restart (arch/arm64/kernel/signal.c:1619)
[ 49.518983] exit_to_user_mode_loop (kernel/entry/common.c:43 kernel/entry/common.c:75)
[ 49.518994] el0_svc (./include/linux/irq-entry-common.h:0 ./include/linux/irq-entry-common.h:242 arch/arm64/kernel/entry-common.c:81 arch/arm64/kernel/entry-common.c:725)
[ 49.519009] el0t_64_sync_handler (arch/arm64/kernel/entry-common.c:0)
[ 49.519023] el0t_64_sync (arch/arm64/kernel/entry.S:596)
[ 49.519119] ------------[ cut here ]------------
[ 49.519124] WARNING: kernel/sched/sched.h:1829 at __schedule+0x404/0xf78, CPU#1: yes/326
[ 49.612674] Modules linked in:
[ 49.615737] CPU: 1 UID: 0 PID: 326 Comm: yes Not tainted 6.19.0-rc4-next-20260109-g8be7ad74b7e4 #261 PREEMPT
[ 49.625670] Hardware name: ARM LTD ARM Juno Development Platform/ARM Juno Development Platform, BIOS EDK II Jul 11 2025
[ 49.636470] pstate: 800000c5 (Nzcv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
[ 49.643443] pc : __schedule (kernel/sched/core.c:0 kernel/sched/sched.h:1907 kernel/sched/core.c:6798)
[ 49.647287] lr : __schedule (kernel/sched/sched.h:1827 kernel/sched/sched.h:1907 kernel/sched/core.c:6798)
[ 49.651130] sp : ffff800081d739e0
[ 49.654445] x29: ffff800081d73a40 x28: ffff000809548908 x27: ffffddc6d7c532e8
[ 49.661604] x26: ffff000809548000 x25: 00000000400004d8 x24: 0000000000000009
[ 49.668762] x23: 0000000000000001 x22: ffffddc6d7bf8500 x21: ffffddc6d5b9bdb0
[ 49.675919] x20: ffff00097681c500 x19: ffff000809548000 x18: ffff800081d735b8
[ 49.683076] x17: 0000000000000063 x16: 0000000000000000 x15: 0000000000000004
[ 49.690233] x14: ffff000809548aa0 x13: 000000000dc48bda x12: 000000002edb68e5
[ 49.697391] x11: 0000000000000000 x10: 0000000000000001 x9 : ffffddc6d7c7b388
[ 49.704548] x8 : ffff000976636420 x7 : ffffddc6d5b9ae64 x6 : 0000000000000000
[ 49.711704] x5 : 0000000000000001 x4 : 0000000000000001 x3 : 0000000000000000
[ 49.718861] x2 : 0000000000000008 x1 : ffff00097681c518 x0 : 0000000000008629
[ 49.726017] Call trace:
[ 49.728462] __schedule (kernel/sched/core.c:0 kernel/sched/sched.h:1907 kernel/sched/core.c:6798) (P)
[ 49.732308] preempt_schedule_common (./arch/arm64/include/asm/preempt.h:53 kernel/sched/core.c:7080)
[ 49.736762] preempt_schedule (kernel/sched/core.c:0)
[ 49.740606] _raw_spin_unlock_irqrestore (./include/linux/spinlock_api_smp.h:0 kernel/locking/spinlock.c:194)
[ 49.745410] sched_move_task (kernel/sched/sched.h:0)
[ 49.749341] sched_autogroup_exit_task (kernel/sched/autogroup.c:157)
[ 49.753969] do_exit (kernel/exit.c:975)
[ 49.757202] do_group_exit (kernel/exit.c:0)
[ 49.760782] get_signal (kernel/signal.c:0)
[ 49.764277] arch_do_signal_or_restart (arch/arm64/kernel/signal.c:1619)
[ 49.769078] exit_to_user_mode_loop (kernel/entry/common.c:43 kernel/entry/common.c:75)
[ 49.773530] el0_svc (./include/linux/irq-entry-common.h:0 ./include/linux/irq-entry-common.h:242 arch/arm64/kernel/entry-common.c:81 arch/arm64/kernel/entry-common.c:725)
[ 49.776767] el0t_64_sync_handler (arch/arm64/kernel/entry-common.c:0)
[ 49.781048] el0t_64_sync (arch/arm64/kernel/entry.S:596)
[ 49.784716] irq event stamp: 80194
[ 49.788118] hardirqs last enabled at (80193): irqentry_exit (kernel/entry/common.c:0)
[ 49.796575] hardirqs last disabled at (80194): __schedule (kernel/sched/core.c:6755)
[ 49.804858] softirqs last enabled at (77126): handle_softirqs (./arch/arm64/include/asm/preempt.h:12 kernel/softirq.c:469 kernel/softirq.c:654)
[ 49.813575] softirqs last disabled at (77121): __do_softirq (kernel/softirq.c:661)
[ 49.821856] ---[ end trace 0000000000000000 ]---
The first stack dump comes from this:
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 1f94994984038..4647fea76d748 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -632,11 +640,17 @@ static inline void deadline_queue_push_tasks(struct rq *rq)
if (!has_pushable_dl_tasks(rq))
return;
+ if (sysctl_sched_debug_local)
+ dump_stack();
+
queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
}
static inline void deadline_queue_pull_task(struct rq *rq)
{
+ if (sysctl_sched_debug_local)
+ dump_stack();
+
queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
}
On 10/6/25 12:44, Peter Zijlstra wrote:
Move sched_class::prio_changed() into the change pattern.The cause might be the above. This used to call __balance_callbacks()
And while there, extend it with sched_class::get_prio() in order to
fix the deadline sitation.
Suggested-by: Tejun Heo <tj@xxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Acked-by: Tejun Heo <tj@xxxxxxxxxx>
---
kernel/sched/core.c | 24 +++++++++++++-----------
kernel/sched/deadline.c | 20 +++++++++++---------
kernel/sched/ext.c | 8 +-------
kernel/sched/fair.c | 8 ++++++--
kernel/sched/idle.c | 5 ++++-
kernel/sched/rt.c | 5 ++++-
kernel/sched/sched.h | 7 ++++---
kernel/sched/stop_task.c | 5 ++++-
kernel/sched/syscalls.c | 9 ---------
9 files changed, 47 insertions(+), 44 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2169,12 +2169,6 @@ inline int task_curr(const struct task_s
return cpu_curr(task_cpu(p)) == p;
}
-void check_prio_changed(struct rq *rq, struct task_struct *p, int oldprio)
-{
- if (oldprio != p->prio || dl_task(p))
- p->sched_class->prio_changed(rq, p, oldprio);
-}
-
void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
{
struct task_struct *donor = rq->donor;
@@ -7402,9 +7396,6 @@ void rt_mutex_setprio(struct task_struct
p->sched_class = next_class;
p->prio = prio;
}
-
- if (!(queue_flag & DEQUEUE_CLASS))
- check_prio_changed(rq, p, oldprio);
out_unlock:
/* Avoid rq from going away on us: */
preempt_disable();
while holding the rq lock.
@@ -10860,6 +10851,13 @@ struct sched_change_ctx *sched_change_beNow this is not the case anymore it seems. prio_changed_dl() sets the
.running = task_current(rq, p),
};
+ if (!(flags & DEQUEUE_CLASS)) {
+ if (p->sched_class->get_prio)
+ ctx->prio = p->sched_class->get_prio(rq, p);
+ else
+ ctx->prio = p->prio;
+ }
+
if (ctx->queued)
dequeue_task(rq, p, flags);
if (ctx->running)
@@ -10886,6 +10884,10 @@ void sched_change_end(struct sched_chang
if (ctx->running)
set_next_task(rq, p);
- if ((ctx->flags & ENQUEUE_CLASS) && p->sched_class->switched_to)
- p->sched_class->switched_to(rq, p);
+ if (ctx->flags & ENQUEUE_CLASS) {
+ if (p->sched_class->switched_to)
+ p->sched_class->switched_to(rq, p);
+ } else {
+ p->sched_class->prio_changed(rq, p, ctx->prio);
+ }
balance_callback and rq_pin_lock() is called with a non-NULL value.