Re: [PATCH 1/2] sched/fair: Add cfs bandwidth burst statistics

From: changhuaixin
Date: Thu Aug 12 2021 - 08:18:32 EST


Ping.

The statistics code is further simplified than the one discussed before. Mind having a look at it?

> On Jul 30, 2021, at 3:09 PM, Huaixin Chang <changhuaixin@xxxxxxxxxxxxxxxxx> wrote:
>
> Two new statistics are introduced to show the internal of burst feature
> and explain why burst helps or not.
>
> nr_bursts: number of periods bandwidth burst occurs
> burst_usec: cumulative wall-time that any cpus has
> used above quota in respective periods
>
> Co-developed-by: Shanpei Chen <shanpeic@xxxxxxxxxxxxxxxxx>
> Signed-off-by: Shanpei Chen <shanpeic@xxxxxxxxxxxxxxxxx>
> Co-developed-by: Tianchen Ding <dtcccc@xxxxxxxxxxxxxxxxx>
> Signed-off-by: Tianchen Ding <dtcccc@xxxxxxxxxxxxxxxxx>
> Signed-off-by: Huaixin Chang <changhuaixin@xxxxxxxxxxxxxxxxx>
> ---
> kernel/sched/core.c | 13 ++++++++++---
> kernel/sched/fair.c | 9 +++++++++
> kernel/sched/sched.h | 3 +++
> 3 files changed, 22 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 2d9ff40f4661..9a286c8a1354 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -10088,6 +10088,9 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v)
> seq_printf(sf, "wait_sum %llu\n", ws);
> }
>
> + seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst);
> + seq_printf(sf, "burst_usec %llu\n", cfs_b->burst_time);
> +
> return 0;
> }
> #endif /* CONFIG_CFS_BANDWIDTH */
> @@ -10184,16 +10187,20 @@ static int cpu_extra_stat_show(struct seq_file *sf,
> {
> struct task_group *tg = css_tg(css);
> struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
> - u64 throttled_usec;
> + u64 throttled_usec, burst_usec;
>
> throttled_usec = cfs_b->throttled_time;
> do_div(throttled_usec, NSEC_PER_USEC);
> + burst_usec = cfs_b->burst_time;
> + do_div(burst_usec, NSEC_PER_USEC);
>
> seq_printf(sf, "nr_periods %d\n"
> "nr_throttled %d\n"
> - "throttled_usec %llu\n",
> + "throttled_usec %llu\n"
> + "nr_bursts %d\n"
> + "burst_usec %llu\n",
> cfs_b->nr_periods, cfs_b->nr_throttled,
> - throttled_usec);
> + throttled_usec, cfs_b->nr_burst, burst_usec);
> }
> #endif
> return 0;
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 44c452072a1b..464371f364f1 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -4655,11 +4655,20 @@ static inline u64 sched_cfs_bandwidth_slice(void)
> */
> void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
> {
> + s64 runtime;
> +
> if (unlikely(cfs_b->quota == RUNTIME_INF))
> return;
>
> cfs_b->runtime += cfs_b->quota;
> + runtime = cfs_b->runtime_snap - cfs_b->runtime;
> + if (runtime > 0) {
> + cfs_b->burst_time += runtime;
> + cfs_b->nr_burst++;
> + }
> +
> cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst);
> + cfs_b->runtime_snap = cfs_b->runtime;
> }
>
> static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 14a41a243f7b..80e4322727b4 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -367,6 +367,7 @@ struct cfs_bandwidth {
> u64 quota;
> u64 runtime;
> u64 burst;
> + u64 runtime_snap;
> s64 hierarchical_quota;
>
> u8 idle;
> @@ -379,7 +380,9 @@ struct cfs_bandwidth {
> /* Statistics: */
> int nr_periods;
> int nr_throttled;
> + int nr_burst;
> u64 throttled_time;
> + u64 burst_time;
> #endif
> };
>
> --
> 2.14.4.44.g2045bb6
>
>