[PATCH 1/4] sched/core: add helpers for iowait handling
From: Jens Axboe
Date: Tue Apr 16 2024 - 08:16:07 EST
Adds helpers to inc/dec the runqueue iowait count, based on the task, and
use those in the spots where the count is manipulated.
Adds an rq_iowait() helper, to abstract out how the per-rq stats are read.
No functional changes in this patch, just in preparation for switching
the type of 'nr_iowait'.
Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
kernel/sched/core.c | 23 +++++++++++++++++++----
kernel/sched/cputime.c | 3 +--
kernel/sched/sched.h | 2 ++
3 files changed, 22 insertions(+), 6 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7019a40457a6..977bb08a33d2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3721,6 +3721,21 @@ static inline cpumask_t *alloc_user_cpus_ptr(int node)
#endif /* !CONFIG_SMP */
+static void task_iowait_inc(struct task_struct *p)
+{
+ atomic_inc(&task_rq(p)->nr_iowait);
+}
+
+static void task_iowait_dec(struct task_struct *p)
+{
+ atomic_dec(&task_rq(p)->nr_iowait);
+}
+
+int rq_iowait(struct rq *rq)
+{
+ return atomic_read(&rq->nr_iowait);
+}
+
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
{
@@ -3787,7 +3802,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
#endif
if (p->in_iowait) {
delayacct_blkio_end(p);
- atomic_dec(&task_rq(p)->nr_iowait);
+ task_iowait_dec(p);
}
activate_task(rq, p, en_flags);
@@ -4364,7 +4379,7 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
if (task_cpu(p) != cpu) {
if (p->in_iowait) {
delayacct_blkio_end(p);
- atomic_dec(&task_rq(p)->nr_iowait);
+ task_iowait_dec(p);
}
wake_flags |= WF_MIGRATED;
@@ -5472,7 +5487,7 @@ unsigned long long nr_context_switches(void)
unsigned int nr_iowait_cpu(int cpu)
{
- return atomic_read(&cpu_rq(cpu)->nr_iowait);
+ return rq_iowait(cpu_rq(cpu));
}
/*
@@ -6692,7 +6707,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
if (prev->in_iowait) {
- atomic_inc(&rq->nr_iowait);
+ task_iowait_inc(prev);
delayacct_blkio_start();
}
}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index af7952f12e6c..7d9423df7779 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -222,9 +222,8 @@ void account_steal_time(u64 cputime)
void account_idle_time(u64 cputime)
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
- struct rq *rq = this_rq();
- if (atomic_read(&rq->nr_iowait) > 0)
+ if (rq_iowait(this_rq()) > 0)
cpustat[CPUTIME_IOWAIT] += cputime;
else
cpustat[CPUTIME_IDLE] += cputime;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d2242679239e..387f67ddf18a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3473,4 +3473,6 @@ static inline void init_sched_mm_cid(struct task_struct *t) { }
extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
+int rq_iowait(struct rq *rq);
+
#endif /* _KERNEL_SCHED_SCHED_H */
--
2.43.0