[PATCH 3/7] sched/uclamp: Add util_est_uclamp

From: Hongyan Xia
Date: Mon Jun 24 2024 - 06:25:04 EST


The new util_est_uclamp is essentially clamp(util_est, min, max) and
follows how util_est operates.

Signed-off-by: Hongyan Xia <hongyan.xia2@xxxxxxx>
---
include/linux/sched.h | 1 +
kernel/sched/fair.c | 30 ++++++++++++++++++++++++++++++
2 files changed, 31 insertions(+)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 63bcb81b20bb..0160567314ae 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -479,6 +479,7 @@ struct sched_avg {
unsigned int util_avg;
int util_avg_bias;
unsigned int util_est;
+ unsigned int util_est_uclamp;
} ____cacheline_aligned;

/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 23360c666829..0fa48466e02e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4859,6 +4859,16 @@ static inline unsigned long task_util_uclamp(struct task_struct *p)

return max(ret, 0L);
}
+
+static inline unsigned long _task_util_est_uclamp(struct task_struct *p)
+{
+ return READ_ONCE(p->se.avg.util_est_uclamp);
+}
+
+static inline unsigned long task_util_est_uclamp(struct task_struct *p)
+{
+ return max(task_util_uclamp(p), _task_util_est_uclamp(p));
+}
#else
static inline long task_util_bias(struct task_struct *p)
{
@@ -4869,6 +4879,16 @@ static inline unsigned long task_util_uclamp(struct task_struct *p)
{
return task_util(p);
}
+
+static inline unsigned long _task_util_est_uclamp(struct task_struct *p)
+{
+ return _task_util_est(p);
+}
+
+static inline unsigned long task_util_est_uclamp(struct task_struct *p)
+{
+ return task_util_est(p);
+}
#endif

static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
@@ -4883,6 +4903,9 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
enqueued = cfs_rq->avg.util_est;
enqueued += _task_util_est(p);
WRITE_ONCE(cfs_rq->avg.util_est, enqueued);
+ enqueued = cfs_rq->avg.util_est_uclamp;
+ enqueued += _task_util_est_uclamp(p);
+ WRITE_ONCE(cfs_rq->avg.util_est_uclamp, enqueued);

trace_sched_util_est_cfs_tp(cfs_rq);
}
@@ -4899,6 +4922,9 @@ static inline void util_est_dequeue(struct cfs_rq *cfs_rq,
enqueued = cfs_rq->avg.util_est;
enqueued -= min_t(unsigned int, enqueued, _task_util_est(p));
WRITE_ONCE(cfs_rq->avg.util_est, enqueued);
+ enqueued = cfs_rq->avg.util_est_uclamp;
+ enqueued -= min_t(unsigned int, enqueued, _task_util_est_uclamp(p));
+ WRITE_ONCE(cfs_rq->avg.util_est_uclamp, enqueued);

trace_sched_util_est_cfs_tp(cfs_rq);
}
@@ -4986,6 +5012,10 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
ewma -= last_ewma_diff;
ewma >>= UTIL_EST_WEIGHT_SHIFT;
done:
+ WRITE_ONCE(p->se.avg.util_est_uclamp,
+ clamp(ewma,
+ (unsigned int)uclamp_eff_value(p, UCLAMP_MIN),
+ (unsigned int)uclamp_eff_value(p, UCLAMP_MAX)));
ewma |= UTIL_AVG_UNCHANGED;
WRITE_ONCE(p->se.avg.util_est, ewma);

--
2.34.1