On 26/05/2024 23:52, Dietmar Eggemann wrote:
[...]
+ old = READ_ONCE(p->se.avg.util_avg_bias);
+ new = (int)clamp(util, uclamp_min, uclamp_max) - (int)util;
+
+ WRITE_ONCE(p->se.avg.util_avg_bias, new);
+ if (!p->se.on_rq)
+ return;
+ WRITE_ONCE(avg->util_avg_bias, READ_ONCE(avg->util_avg_bias) + new - old);
+}
+#else /* !CONFIG_UCLAMP_TASK */
+static void update_util_bias(struct sched_avg *avg, struct task_struct *p)
+{
+}
+#endif
+
/*
* sched_entity:
*
@@ -296,6 +330,8 @@ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
{
if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
___update_load_avg(&se->avg, se_weight(se));
+ if (entity_is_task(se))
+ update_util_bias(NULL, task_of(se));
IMHO,
update_util_bias(struct sched_avg *avg, struct sched_entity *se)
if (!entity_is_task(se))
return;
...
would be easier to read.
Yeah, that would work, and might be a good way to prepare for group clamping if it ever becomes a thing.
[...]