Re: [PATCH v2 2/2] sched/uclamp: Add uclamp_is_used() check before enable it
From: Vincent Guittot
Date: Wed Feb 19 2025 - 11:29:14 EST
On Wed, 19 Feb 2025 at 10:40, Xuewen Yan <xuewen.yan@xxxxxxxxxx> wrote:
>
> Because the static_branch_enable() would get the cpus_read_lock(),
> and sometimes users may frequently set the uclamp value of tasks,
> and this operation would call the static_branch_enable()
> frequently, so add the uclamp_is_used() check to prevent calling
> the cpus_read_lock() frequently.
> And to make the code more concise, add a helper function to encapsulate
> this and use it everywhere we enable sched_uclamp_used.
>
> Signed-off-by: Xuewen Yan <xuewen.yan@xxxxxxxxxx>
Apart the small comment below
Reviewed-by: Vincent Guittot <vincent.guittot@xxxxxxxxxx>
> ---
> V2:
> - change some commit message;
> - Add a helper function and use it everywhere when enable uclamp (Vincent)
> ---
> ---
> kernel/sched/core.c | 6 +++---
> kernel/sched/sched.h | 14 ++++++++++++++
> kernel/sched/syscalls.c | 2 +-
> 3 files changed, 18 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 38a7192bfc19..0466a2f61b99 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -1942,12 +1942,12 @@ static int sysctl_sched_uclamp_handler(const struct ctl_table *table, int write,
> }
>
> if (update_root_tg) {
> - static_branch_enable(&sched_uclamp_used);
> + sched_uclamp_enable();
> uclamp_update_root_tg();
> }
>
> if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) {
> - static_branch_enable(&sched_uclamp_used);
> + sched_uclamp_enable();
> uclamp_sync_util_min_rt_default();
> }
>
> @@ -9295,7 +9295,7 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
> if (req.ret)
> return req.ret;
>
> - static_branch_enable(&sched_uclamp_used);
> + sched_uclamp_enable();
>
> guard(mutex)(&uclamp_mutex);
> guard(rcu)();
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 3624fdce5536..bd22af347d0b 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -3407,6 +3407,18 @@ static inline bool uclamp_is_used(void)
> return static_branch_likely(&sched_uclamp_used);
> }
>
> +/*
> + * Enabling static branches would get the cpus_read_lock(),
> + * check whether uclamp_is_used before enable it to avoid always
> + * calling cpus_read_lock(). Because we never disable this
> + * static key once enable it.
Maybe, simply:
Enabling static branches would get the cpus_read_lock(),
check uclamp_is_used before enabling it. There is no race issue
because we never disable this static key once enabled.
> + */
> +static inline void sched_uclamp_enable(void)
> +{
> + if (!uclamp_is_used())
> + static_branch_enable(&sched_uclamp_used);
> +}
> +
> static inline unsigned long uclamp_rq_get(struct rq *rq,
> enum uclamp_id clamp_id)
> {
> @@ -3486,6 +3498,8 @@ static inline bool uclamp_is_used(void)
> return false;
> }
>
> +static inline void sched_uclamp_enable(void) {}
> +
> static inline unsigned long
> uclamp_rq_get(struct rq *rq, enum uclamp_id clamp_id)
> {
> diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
> index 456d339be98f..9100a77e9d79 100644
> --- a/kernel/sched/syscalls.c
> +++ b/kernel/sched/syscalls.c
> @@ -368,7 +368,7 @@ static int uclamp_validate(struct task_struct *p,
> * blocking operation which obviously cannot be done while holding
> * scheduler locks.
> */
> - static_branch_enable(&sched_uclamp_used);
> + sched_uclamp_enable();
>
> return 0;
> }
> --
> 2.25.1
>