Re: [RFC PATCH v4] sched/fair: select idle cpu from idle cpumask for task wakeup
From: Vincent Guittot
Date: Thu Nov 19 2020 - 03:19:44 EST
On Thu, 19 Nov 2020 at 02:34, Li, Aubrey <aubrey.li@xxxxxxxxxxxxxxx> wrote:
>
> Hi Vincent,
>
> On 2020/11/18 21:36, Vincent Guittot wrote:
> > On Wed, 18 Nov 2020 at 04:48, Aubrey Li <aubrey.li@xxxxxxxxxxxxxxx> wrote:
> >>
> >> From: Aubrey Li <aubrey.li@xxxxxxxxx>
> >>
> >> Add idle cpumask to track idle cpus in sched domain. When a CPU
> >> enters idle, if the idle driver indicates to stop tick, this CPU
> >> is set in the idle cpumask to be a wakeup target. And if the CPU
> >> is not in idle, the CPU is cleared in idle cpumask during scheduler
> >> tick to ratelimit idle cpumask update.
> >>
> >> When a task wakes up to select an idle cpu, scanning idle cpumask
> >> has low cost than scanning all the cpus in last level cache domain,
> >> especially when the system is heavily loaded.
> >>
> >> Benchmarks were tested on a x86 4 socket system with 24 cores per
> >> socket and 2 hyperthreads per core, total 192 CPUs. Hackbench and
> >> schbench have no notable change, uperf has:
> >>
> >> uperf throughput: netperf workload, tcp_nodelay, r/w size = 90
> >>
> >> threads baseline-avg %std patch-avg %std
> >> 96 1 0.83 1.23 3.27
> >> 144 1 1.03 1.67 2.67
> >> 192 1 0.69 1.81 3.59
> >> 240 1 2.84 1.51 2.67
> >>
> >> Cc: Mel Gorman <mgorman@xxxxxxx>
> >> Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
> >> Cc: Qais Yousef <qais.yousef@xxxxxxx>
> >> Cc: Valentin Schneider <valentin.schneider@xxxxxxx>
> >> Cc: Jiang Biao <benbjiang@xxxxxxxxx>
> >> Cc: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
> >> Signed-off-by: Aubrey Li <aubrey.li@xxxxxxxxxxxxxxx>
> >> ---
> >> include/linux/sched/topology.h | 13 +++++++++
> >> kernel/sched/core.c | 2 ++
> >> kernel/sched/fair.c | 52 +++++++++++++++++++++++++++++++++-
> >> kernel/sched/idle.c | 7 +++--
> >> kernel/sched/sched.h | 2 ++
> >> kernel/sched/topology.c | 3 +-
> >> 6 files changed, 74 insertions(+), 5 deletions(-)
> >>
> >> diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
> >> index 820511289857..b47b85163607 100644
> >> --- a/include/linux/sched/topology.h
> >> +++ b/include/linux/sched/topology.h
> >> @@ -65,8 +65,21 @@ struct sched_domain_shared {
> >> atomic_t ref;
> >> atomic_t nr_busy_cpus;
> >> int has_idle_cores;
> >> + /*
> >> + * Span of all idle CPUs in this domain.
> >> + *
> >> + * NOTE: this field is variable length. (Allocated dynamically
> >> + * by attaching extra space to the end of the structure,
> >> + * depending on how many CPUs the kernel has booted up with)
> >> + */
> >> + unsigned long idle_cpus_span[];
> >> };
> >>
> >> +static inline struct cpumask *sds_idle_cpus(struct sched_domain_shared *sds)
> >> +{
> >> + return to_cpumask(sds->idle_cpus_span);
> >> +}
> >> +
> >> struct sched_domain {
> >> /* These fields must be setup */
> >> struct sched_domain __rcu *parent; /* top domain must be null terminated */
> >> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> >> index b1e0da56abca..c86ae0495163 100644
> >> --- a/kernel/sched/core.c
> >> +++ b/kernel/sched/core.c
> >> @@ -3994,6 +3994,7 @@ void scheduler_tick(void)
> >> rq_lock(rq, &rf);
> >>
> >> update_rq_clock(rq);
> >> + update_idle_cpumask(rq, false);
> >> thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
> >> update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
> >> curr->sched_class->task_tick(rq, curr, 0);
> >> @@ -7192,6 +7193,7 @@ void __init sched_init(void)
> >> rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func);
> >> #endif
> >> #endif /* CONFIG_SMP */
> >> + rq->last_idle_state = 1;
> >> hrtick_rq_init(rq);
> >> atomic_set(&rq->nr_iowait, 0);
> >> }
> >> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> >> index 48a6d442b444..d67fba5e406b 100644
> >> --- a/kernel/sched/fair.c
> >> +++ b/kernel/sched/fair.c
> >> @@ -6145,7 +6145,12 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
> >>
> >> time = cpu_clock(this);
> >>
> >> - cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
> >> + /*
> >> + * sched_domain_shared is set only at shared cache level,
> >> + * this works only because select_idle_cpu is called with
> >> + * sd_llc.
> >> + */
> >> + cpumask_and(cpus, sds_idle_cpus(sd->shared), p->cpus_ptr);
> >>
> >> for_each_cpu_wrap(cpu, cpus, target) {
> >> if (!--nr)
> >> @@ -6807,6 +6812,51 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
> >> }
> >> #endif /* CONFIG_SMP */
> >>
> >> +/*
> >> + * Update cpu idle state and record this information
> >> + * in sd_llc_shared->idle_cpus_span.
> >> + */
> >> +void update_idle_cpumask(struct rq *rq, bool set_idle)
> >> +{
> >> + struct sched_domain *sd;
> >> + int cpu = cpu_of(rq);
> >> + int idle_state;
> >> +
> >> + /*
> >> + * If called from scheduler tick, only update
> >> + * idle cpumask if the CPU is busy, as idle
> >> + * cpumask is also updated on idle entry.
> >> + *
> >> + */
> >> + if (!set_idle && idle_cpu(cpu))
> >> + return;
> >> + /*
> >> + * Also set SCHED_IDLE cpu in idle cpumask to
> >> + * allow SCHED_IDLE cpu as a wakeup target
> >> + */
> >> + idle_state = set_idle || sched_idle_cpu(cpu);
> >> + /*
> >> + * No need to update idle cpumask if the state
> >> + * does not change.
> >> + */
> >> + if (rq->last_idle_state == idle_state)
> >> + return;
> >> +
> >> + rcu_read_lock();
> >> + sd = rcu_dereference(per_cpu(sd_llc, cpu));
> >> + if (!sd || !sd->shared)
> >> + goto unlock;
> >> +
> >> + if (idle_state)
> >> + cpumask_set_cpu(cpu, sds_idle_cpus(sd->shared));
> >> + else
> >> + cpumask_clear_cpu(cpu, sds_idle_cpus(sd->shared));
> >> +
> >> + rq->last_idle_state = idle_state;
> >> +unlock:
> >> + rcu_read_unlock();
> >> +}
> >> +
> >> static unsigned long wakeup_gran(struct sched_entity *se)
> >> {
> >> unsigned long gran = sysctl_sched_wakeup_granularity;
> >> diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
> >> index f324dc36fc43..0bd83c00c22a 100644
> >> --- a/kernel/sched/idle.c
> >> +++ b/kernel/sched/idle.c
> >> @@ -164,7 +164,7 @@ static void cpuidle_idle_call(void)
> >>
> >> if (cpuidle_not_available(drv, dev)) {
> >> tick_nohz_idle_stop_tick();
> >> -
> >> + update_idle_cpumask(this_rq(), true);
> >> default_idle_call();
> >> goto exit_idle;
> >> }
> >> @@ -205,9 +205,10 @@ static void cpuidle_idle_call(void)
> >> */
> >> next_state = cpuidle_select(drv, dev, &stop_tick);
> >>
> >> - if (stop_tick || tick_nohz_tick_stopped())
> >> + if (stop_tick || tick_nohz_tick_stopped()) {
> >> + update_idle_cpumask(this_rq(), true);
> >> tick_nohz_idle_stop_tick();
> >
> > please keep same ordering here and above
> > tick_nohz_idle_stop_tick();
> > update_idle_cpumask(this_rq(), true);
>
> okay, will do, thanks!
>
> >
> > Also, it might be good to call update_idle_cpumask everywhere
> > tick_nohz_idle_stop_tick is called. For example S2Idle case should
> > also set the mask
>
> Hmm, I did think about this, but not sure what's the point here to
> set the mask when the whole system suspend to idle and all user space
> are forzen?
when system is in suspend to idle state, the cpu is idle. Would be
good to set it in the mask so it can be used after the resume without
waiting for an idle load balance to pull task if any
>
> Also tick_nohz_idle_stop_tick is called when cpu is offline in cpuidle_idle_call,
> I think adding an offlined cpu to idle cpumask may not be right.
Yes, should be not set when offlining the cpu
>
> >
> >
> >> - else
> >> + } else
> >
> > Also add { } in the else
> >
>
> Thanks, will added it.
>
> -Aubrey