Re: [PATCH] RFC: sched: Rework task_sched_runtime to avoid calling update_rq_clock

From: Peter Zijlstra
Date: Fri Jun 14 2024 - 05:56:13 EST


On Thu, Jun 13, 2024 at 12:51:42PM +0100, Qais Yousef wrote:
> On 06/13/24 12:04, Peter Zijlstra wrote:
>
> > ---
> > diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> > index 0935f9d4bb7b..d4b87539d72a 100644
> > --- a/kernel/sched/core.c
> > +++ b/kernel/sched/core.c
> > @@ -724,7 +724,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
> >
> > rq->prev_irq_time += irq_delta;
> > delta -= irq_delta;
> > - psi_account_irqtime(rq->curr, irq_delta);
> > delayacct_irq(rq->curr, irq_delta);
> > #endif
> > #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
> > @@ -5459,6 +5458,8 @@ void sched_tick(void)
> >
> > sched_clock_tick();
> >
> > + psi_account_irqtime(curr, &rq->psi_irq_time);
> > +
>
> If wakeup preemption causes a context switch, wouldn't we lose this
> information then? I *think* active migration might cause this information to be
> lost too.

I'm not sure what would be lost ?! the accounting is per cpu, not per
task afaict. That said,...

> pick_next_task() might be a better place to do the accounting?

Additionally, when there has been an effective cgroup switch. Only on
switch doesn't work for long running tasks, then the PSI information
will be artitrarily long out of date.

Which then gets me something like the (completely untested) below..

Hmm?

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0935f9d4bb7b..36aed99d6a6c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -724,7 +724,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)

rq->prev_irq_time += irq_delta;
delta -= irq_delta;
- psi_account_irqtime(rq->curr, irq_delta);
delayacct_irq(rq->curr, irq_delta);
#endif
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
@@ -5459,6 +5458,8 @@ void sched_tick(void)

sched_clock_tick();

+ psi_account_irqtime(curr, NULL, &rq->psi_irq_time);
+
rq_lock(rq, &rf);

update_rq_clock(rq);
@@ -6521,6 +6524,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
++*switch_count;

migrate_disable_switch(rq, prev);
+ psi_account_irqtime(prev, next, &rq->psi_irq_time);
psi_sched_switch(prev, next, !task_on_rq_queued(prev));

trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 146baa91d104..65bba162408f 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -991,22 +991,31 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next,
}

#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-void psi_account_irqtime(struct task_struct *task, u32 delta)
+void psi_account_irqtime(struct task_struct *curr, struct task_struct *prev, u64 *time)
{
- int cpu = task_cpu(task);
+ int cpu = task_cpu(curr);
struct psi_group *group;
struct psi_group_cpu *groupc;
- u64 now;
+ u64 now, irq;
+ s64 delta;

if (static_branch_likely(&psi_disabled))
return;

- if (!task->pid)
+ if (!curr->pid)
+ return;
+
+ group = task_psi_group(curr);
+ if( prev && task_psi_group(prev) == group)
return;

now = cpu_clock(cpu);
+ irq = irq_time_read(cpu);
+ delta = (s64)(irq - *time);
+ if (delta < 0)
+ return;
+ *time = irq;

- group = task_psi_group(task);
do {
if (!group->enabled)
continue;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 62fd8bc6fd08..a63eb546bc4a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1133,6 +1133,7 @@ struct rq {

#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
+ u64 psi_irq_time;
#endif
#ifdef CONFIG_PARAVIRT
u64 prev_steal_time;
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index d1445410840a..1e290054c5db 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -110,7 +110,7 @@ __schedstats_from_se(struct sched_entity *se)
void psi_task_change(struct task_struct *task, int clear, int set);
void psi_task_switch(struct task_struct *prev, struct task_struct *next,
bool sleep);
-void psi_account_irqtime(struct task_struct *task, u32 delta);
+void psi_account_irqtime(struct task_struct *curr, struct task_struct *prev, u64 *time);

/*
* PSI tracks state that persists across sleeps, such as iowaits and
@@ -192,7 +192,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) {}
static inline void psi_sched_switch(struct task_struct *prev,
struct task_struct *next,
bool sleep) {}
-static inline void psi_account_irqtime(struct task_struct *task, u32 delta) {}
+static inline void psi_account_irqtime(struct task_struct *curr, struct task_struct *prev, u64 *time) {}
#endif /* CONFIG_PSI */

#ifdef CONFIG_SCHED_INFO