Re: [PATCH v4 4/4] sched/fair: Check the SD_ASYM_PACKING flag in sched_use_asym_prio()
From: Ricardo Neri
Date: Fri Feb 09 2024 - 08:29:27 EST
On Fri, Feb 09, 2024 at 07:12:10PM +0800, kuiliang Shi wrote:
> Hi Valentin&Ricardo,
>
> Any more comment for this patch? or Reviewed-by from you as a Chinese new year gift for this patch? :)
I will give you a Tested-by tag ;). I have started testing your patches but
I am not done yet.
>
> Thanks
> Alex
>
> On 2/7/24 11:47 AM, alexs@xxxxxxxxxx wrote:
> > From: Alex Shi <alexs@xxxxxxxxxx>
> >
> > sched_use_asym_prio() checks whether CPU priorities should be used. It
> > makes sense to check for the SD_ASYM_PACKING() inside the function.
> > Since both sched_asym() and sched_group_asym() use sched_use_asym_prio(),
> > remove the now superfluous checks for the flag in various places.
> >
> > Signed-off-by: Alex Shi <alexs@xxxxxxxxxx>
> > To: linux-kernel@xxxxxxxxxxxxxxx
> > To: Ricardo Neri <ricardo.neri-calderon@xxxxxxxxxxxxxxx>
> > To: Ben Segall <bsegall@xxxxxxxxxx>
> > To: Steven Rostedt <rostedt@xxxxxxxxxxx>
> > To: Dietmar Eggemann <dietmar.eggemann@xxxxxxx>
> > To: Valentin Schneider <vschneid@xxxxxxxxxx>
> > To: Daniel Bristot de Oliveira <bristot@xxxxxxxxxx>
> > To: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
> > To: Juri Lelli <juri.lelli@xxxxxxxxxx>
> > To: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
> > To: Ingo Molnar <mingo@xxxxxxxxxx>
> > ---
> > kernel/sched/fair.c | 16 +++++++---------
> > 1 file changed, 7 insertions(+), 9 deletions(-)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 942b6358f683..10ae28e1c088 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -9740,6 +9740,9 @@ group_type group_classify(unsigned int imbalance_pct,
> > */
> > static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> > {
> > + if (!(sd->flags & SD_ASYM_PACKING))
> > + return false;
> > +
> > if (!sched_smt_active())
> > return true;
> >
> > @@ -9941,11 +9944,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> > sgs->group_weight = group->group_weight;
> >
> > /* Check if dst CPU is idle and preferred to this group */
> > - if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
> > - env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> > - sched_group_asym(env, sgs, group)) {
> > + if (!local_group && env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> > + sched_group_asym(env, sgs, group))
> > sgs->group_asym_packing = 1;
> > - }
> >
> > /* Check for loaded SMT group to be balanced to dst CPU */
> > if (!local_group && smt_balance(env, sgs, group))
> > @@ -11041,9 +11042,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
> > * If balancing between cores, let lower priority CPUs help
> > * SMT cores with more than one busy sibling.
> > */
> > - if ((env->sd->flags & SD_ASYM_PACKING) &&
> > - sched_asym(env->sd, i, env->dst_cpu) &&
> > - nr_running == 1)
> > + if (sched_asym(env->sd, i, env->dst_cpu) && nr_running == 1)
> > continue;
> >
> > switch (env->migration_type) {
> > @@ -11139,8 +11138,7 @@ asym_active_balance(struct lb_env *env)
> > * the lower priority @env::dst_cpu help it. Do not follow
> > * CPU priority.
> > */
> > - return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
> > - sched_use_asym_prio(env->sd, env->dst_cpu) &&
> > + return env->idle != CPU_NOT_IDLE && sched_use_asym_prio(env->sd, env->dst_cpu) &&
> > (sched_asym_prefer(env->dst_cpu, env->src_cpu) ||
> > !sched_use_asym_prio(env->sd, env->src_cpu));
> > }