Re: [PATCH v5 2/3] sched/topology: Rework CPU capacity asymmetry detection
From: Beata Michalska
Date: Tue May 25 2021 - 05:30:53 EST
On Tue, May 25, 2021 at 10:25:36AM +0200, Dietmar Eggemann wrote:
> On 24/05/2021 12:16, Beata Michalska wrote:
>
> [...]
>
> > Rework the way the capacity asymmetry levels are being detected,
> > allowing to point to the lowest topology level (for a given CPU), where
> > full set of available CPU capacities is visible to all CPUs within given
> > domain. As a result, the per-cpu sd_asym_cpucapacity might differ across
> > the domains. This will have an impact on EAS wake-up placement in a way
> > that it might see different rage of CPUs to be considered, depending on
>
> s/rage/range ;-)
Right ..... :)
>
> [...]
>
> > @@ -1266,6 +1266,112 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
> > update_group_capacity(sd, cpu);
> > }
> >
> > +/**
> > + * Asymmetric CPU capacity bits
> > + */
> > +struct asym_cap_data {
> > + struct list_head link;
> > + unsigned long capacity;
> > + struct cpumask *cpu_mask;
>
> Not sure if this has been discussed already but shouldn't the flexible
> array members` approach known from struct sched_group, struct
> sched_domain or struct em_perf_domain be used here?
> IIRC the last time this has been discussed in this thread:
> https://lkml.kernel.org/r/20200910054203.525420-2-aubrey.li@xxxxxxxxx
>
If I got right the discussion you have pointed to, it was about using
cpumask_var_t which is not the case here. I do not mind moving the code
to use the array but I am not sure if this changes much. Looking at the
code changes to support that (to_cpumask namely) it was introduced for
cases where cpumask_var_t was not appropriate, which again isn't the case
here.
---
BR
B.
> diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
> index 0de6eef91bc8..03e492e91bd7 100644
> --- a/kernel/sched/topology.c
> +++ b/kernel/sched/topology.c
> @@ -1271,8 +1271,8 @@ static void init_sched_groups_capacity(int cpu,
> struct sched_domain *sd)
> */
> struct asym_cap_data {
> struct list_head link;
> - unsigned long capacity;
> - struct cpumask *cpu_mask;
> + unsigned long capacity;
> + unsigned long cpumask[];
> };
>
> /*
> @@ -1299,14 +1299,14 @@ asym_cpu_capacity_classify(struct sched_domain *sd,
> goto leave;
>
> list_for_each_entry(entry, &asym_cap_list, link) {
> - if (cpumask_intersects(sched_domain_span(sd),
> entry->cpu_mask)) {
> + if (cpumask_intersects(sched_domain_span(sd),
> to_cpumask(entry->cpumask))) {
> ++asym_cap_count;
> } else {
> /*
> * CPUs with given capacity might be offline
> * so make sure this is not the case
> */
> - if (cpumask_intersects(entry->cpu_mask, cpu_map)) {
> + if
> (cpumask_intersects(to_cpumask(entry->cpumask), cpu_map)) {
> sd_asym_flags &= ~SD_ASYM_CPUCAPACITY_FULL;
> if (asym_cap_count > 1)
> break;
> @@ -1332,7 +1332,6 @@ asym_cpu_capacity_get_data(unsigned long capacity)
> if (WARN_ONCE(!entry, "Failed to allocate memory for asymmetry
> data\n"))
> goto done;
> entry->capacity = capacity;
> - entry->cpu_mask = (struct cpumask *)((char *)entry +
> sizeof(*entry));
> list_add(&entry->link, &asym_cap_list);
> done:
> return entry;
> @@ -1349,7 +1348,7 @@ static void asym_cpu_capacity_scan(void)
> int cpu;
>
> list_for_each_entry(entry, &asym_cap_list, link)
> - cpumask_clear(entry->cpu_mask);
> + cpumask_clear(to_cpumask(entry->cpumask));
>
> entry = list_first_entry_or_null(&asym_cap_list,
> struct asym_cap_data, link);
> @@ -1361,11 +1360,11 @@ static void asym_cpu_capacity_scan(void)
> if (!entry || capacity != entry->capacity)
> entry = asym_cpu_capacity_get_data(capacity);
> if (entry)
> - __cpumask_set_cpu(cpu, entry->cpu_mask);
> + __cpumask_set_cpu(cpu, to_cpumask(entry->cpumask));
> }
>
> list_for_each_entry_safe(entry, next, &asym_cap_list, link) {
> - if (cpumask_empty(entry->cpu_mask)) {
> + if (cpumask_empty(to_cpumask(entry->cpumask))) {
> list_del(&entry->link);
> kfree(entry);
> }
>
> [...]