Re: [PATCH v3 3/4] sched/fair: packing func sched_use_asym_prio()/sched_asym_prefer()

From: kuiliang Shi
Date: Sun Feb 04 2024 - 06:52:45 EST


Hi Ricardo,

Since your good suggestions took in this and the next patch, do you mind to give Reviewed-by for both of them?

Thanks
Alex

On 2/1/24 7:54 PM, alexs@xxxxxxxxxx wrote:
> From: Alex Shi <alexs@xxxxxxxxxx>
>
> Consolidate the functions sched_use_asym_prio() and sched_asym_prefer()
> into one. and rename sched_asym() as sched_group_asym().
> This makes the code easier to read. No functional changes.
>
> Signed-off-by: Alex Shi <alexs@xxxxxxxxxx>
> To: Ricardo Neri <ricardo.neri-calderon@xxxxxxxxxxxxxxx>
> To: Valentin Schneider <vschneid@xxxxxxxxxx>
> To: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
> To: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
> To: Ingo Molnar <mingo@xxxxxxxxxx>
> ---
> kernel/sched/fair.c | 35 ++++++++++++++++++-----------------
> 1 file changed, 18 insertions(+), 17 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 8d70417f5125..44fd5e2ca642 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9747,8 +9747,15 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
> }
>
> +static inline bool sched_asym(struct sched_domain *sd, int dst_cpu, int src_cpu)
> +{
> + /* Check if asym balance applicable, then check priorities.*/
> + return sched_use_asym_prio(sd, dst_cpu) &&
> + sched_asym_prefer(dst_cpu, src_cpu);
> +}
> +
> /**
> - * sched_asym - Check if the destination CPU can do asym_packing load balance
> + * sched_group_asym - Check if the destination CPU can do asym_packing balance
> * @env: The load balancing environment
> * @sgs: Load-balancing statistics of the candidate busiest group
> * @group: The candidate busiest group
> @@ -9768,22 +9775,18 @@ static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
> * otherwise.
> */
> static inline bool
> -sched_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
> +sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group *group)
> {
> - /* Ensure that the whole local core is idle, if applicable. */
> - if (!sched_use_asym_prio(env->sd, env->dst_cpu))
> - return false;
> -
> /*
> - * CPU priorities does not make sense for SMT cores with more than one
> + * CPU priorities do not make sense for SMT cores with more than one
> * busy sibling.
> */
> - if (group->flags & SD_SHARE_CPUCAPACITY) {
> - if (sgs->group_weight - sgs->idle_cpus != 1)
> - return false;
> - }
>
> - return sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu);
> + if ((group->flags & SD_SHARE_CPUCAPACITY) &&
> + (sgs->group_weight - sgs->idle_cpus != 1))
> + return false;
> +
> + return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
> }
>
> /* One group has more than one SMT CPU while the other group does not */
> @@ -9939,7 +9942,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> /* Check if dst CPU is idle and preferred to this group */
> if (!local_group && env->sd->flags & SD_ASYM_PACKING &&
> env->idle != CPU_NOT_IDLE && sgs->sum_h_nr_running &&
> - sched_asym(env, sgs, group)) {
> + sched_group_asym(env, sgs, group)) {
> sgs->group_asym_packing = 1;
> }
>
> @@ -11038,8 +11041,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
> * SMT cores with more than one busy sibling.
> */
> if ((env->sd->flags & SD_ASYM_PACKING) &&
> - sched_use_asym_prio(env->sd, i) &&
> - sched_asym_prefer(i, env->dst_cpu) &&
> + sched_asym(env->sd, i, env->dst_cpu) &&
> nr_running == 1)
> continue;
>
> @@ -11909,8 +11911,7 @@ static void nohz_balancer_kick(struct rq *rq)
> * preferred CPU must be idle.
> */
> for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
> - if (sched_use_asym_prio(sd, i) &&
> - sched_asym_prefer(i, cpu)) {
> + if (sched_asym(sd, i, cpu)) {
> flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
> goto unlock;
> }