Re: [RFC PATCH v5 22/29] sched/rt: Add rt-cgroup migration functions
From: Peter Zijlstra
Date: Tue May 05 2026 - 11:21:08 EST
On Thu, Apr 30, 2026 at 11:38:26PM +0200, Yuri Andriaccio wrote:
> +static int group_find_lowest_rt_rq(struct task_struct *task, struct rt_rq *task_rt_rq)
> +{
> + struct sched_domain *sd;
> + struct cpumask lowest_mask;
> + struct sched_dl_entity *dl_se;
> + struct rt_rq *rt_rq;
> + int prio, lowest_prio;
> + int cpu, this_cpu = smp_processor_id();
> +
> + if (task->nr_cpus_allowed == 1)
> + return -1; /* No other targets possible */
> +
> + lowest_prio = task->prio - 1;
> + cpumask_clear(&lowest_mask);
> + for_each_cpu_and(cpu, cpu_online_mask, task->cpus_ptr) {
> + dl_se = task_rt_rq->tg->dl_se[cpu];
> + rt_rq = &dl_se->my_q->rt;
> + prio = rt_rq->highest_prio.curr;
> +
> + /*
> + * If we're on asym system ensure we consider the different capacities
> + * of the CPUs when searching for the lowest_mask.
> + */
> + if (dl_se->dl_throttled || !rt_task_fits_capacity(task, cpu))
> + continue;
> +
> + if (prio >= lowest_prio) {
> + if (prio > lowest_prio) {
> + cpumask_clear(&lowest_mask);
> + lowest_prio = prio;
> + }
> +
> + cpumask_set_cpu(cpu, &lowest_mask);
> + }
> + }
> +
> + if (cpumask_empty(&lowest_mask))
> + return -1;
> +
> + /*
> + * At this point we have built a mask of CPUs representing the
> + * lowest priority tasks in the system. Now we want to elect
> + * the best one based on our affinity and topology.
> + *
> + * We prioritize the last CPU that the task executed on since
> + * it is most likely cache-hot in that location.
> + */
> + cpu = task_cpu(task);
> + if (cpumask_test_cpu(cpu, &lowest_mask))
> + return cpu;
> +
> + /*
> + * Otherwise, we consult the sched_domains span maps to figure
> + * out which CPU is logically closest to our hot cache data.
> + */
> + if (!cpumask_test_cpu(this_cpu, &lowest_mask))
> + this_cpu = -1; /* Skip this_cpu opt if not among lowest */
> +
> + scoped_guard(rcu) {
> + for_each_domain(cpu, sd) {
> + if (sd->flags & SD_WAKE_AFFINE) {
> + int best_cpu;
> +
> + /*
> + * "this_cpu" is cheaper to preempt than a
> + * remote processor.
> + */
> + if (this_cpu != -1 &&
> + cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
> + return this_cpu;
> +
> + best_cpu = cpumask_any_and_distribute(&lowest_mask,
> + sched_domain_span(sd));
> + if (best_cpu < nr_cpu_ids)
> + return best_cpu;
> + }
> + }
> + }
I appreciate you trying to save on indent, but this does violate
coding-style, please indent as normal.
> +
> + /*
> + * And finally, if there were no matches within the domains
> + * just give the caller *something* to work with from the compatible
> + * locations.
> + */
> + if (this_cpu != -1)
> + return this_cpu;
> +
> + cpu = cpumask_any_distribute(&lowest_mask);
> + if (cpu < nr_cpu_ids)
> + return cpu;
> +
> + return -1;
> +}