Re: [PATCH 1/4] sched/topology: Store root domain CPU capacity sum

From: Vincent Guittot
Date: Wed Apr 08 2020 - 08:29:32 EST


On Wed, 8 Apr 2020 at 11:50, Dietmar Eggemann <dietmar.eggemann@xxxxxxx> wrote:
>
> Add the sum of (original) CPU capacity of all member CPUs to the root
> domain.
>
> This is needed for capacity-aware SCHED_DEADLINE admission control.
>
> Signed-off-by: Dietmar Eggemann <dietmar.eggemann@xxxxxxx>
> ---
> kernel/sched/sched.h | 11 +++++++++++
> kernel/sched/topology.c | 14 ++++++++++----
> 2 files changed, 21 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 1e72d1b3d3ce..91bd0cb0c529 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -797,6 +797,7 @@ struct root_domain {
> cpumask_var_t rto_mask;
> struct cpupri cpupri;
>
> + unsigned long sum_cpu_capacity;
> unsigned long max_cpu_capacity;
>
> /*
> @@ -2393,6 +2394,16 @@ static inline unsigned long capacity_orig_of(int cpu)
> {
> return cpu_rq(cpu)->cpu_capacity_orig;
> }
> +
> +static inline unsigned long rd_capacity(int cpu)
> +{
> + return cpu_rq(cpu)->rd->sum_cpu_capacity;
> +}
> +#else
> +static inline unsigned long rd_capacity(int cpu)
> +{
> + return SCHED_CAPACITY_SCALE;
> +}
> #endif
>
> /**
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 8344757bba6e..74b0c0fa4b1b 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -2052,12 +2052,17 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
> /* Attach the domains */
> rcu_read_lock();
> for_each_cpu(i, cpu_map) {
> + unsigned long cap = arch_scale_cpu_capacity(i);

Why do you replace the use of rq->cpu_capacity_orig by
arch_scale_cpu_capacity(i) ?
There is nothing about this change in the commit message

> +
> rq = cpu_rq(i);
> sd = *per_cpu_ptr(d.sd, i);
>
> /* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
> - if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
> - WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
> + if (cap > READ_ONCE(d.rd->max_cpu_capacity))
> + WRITE_ONCE(d.rd->max_cpu_capacity, cap);
> +
> + WRITE_ONCE(d.rd->sum_cpu_capacity,
> + READ_ONCE(d.rd->sum_cpu_capacity) + cap);
>
> cpu_attach_domain(sd, d.rd, i);
> }
> @@ -2067,8 +2072,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
> static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
>
> if (rq && sched_debug_enabled) {
> - pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
> - cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
> + pr_info("root domain span: %*pbl (capacity = %lu max cpu_capacity = %lu)\n",
> + cpumask_pr_args(cpu_map), rq->rd->sum_cpu_capacity,
> + rq->rd->max_cpu_capacity);
> }
>
> ret = 0;
> --
> 2.17.1
>