Re: [PATCH v1 27/31] x86/resctrl: Rename resctrl_sched_in() to begin resctrl_arch_

From: Moger, Babu
Date: Mon Apr 15 2024 - 16:43:46 EST




On 3/21/24 11:51, James Morse wrote:
> resctrl_sched_in() loads the architecture specific CPU MSRs with the
> CLOSID and RMID values. This function was named before resctrl was
> split to have architecture specific code, and generic filesystem code.
>
> This function is obviously architecture specific, but does not begin
> with 'resctrl_arch_', making it the odd one out in the functions an
> architecture needs to support to enable resctrl.
>
> Rename it for concistency. This is purely cosmetic.

s/concistency/consistency

>
> Signed-off-by: James Morse <james.morse@xxxxxxx>
> ---
> arch/x86/include/asm/resctrl.h | 4 ++--
> arch/x86/kernel/cpu/resctrl/rdtgroup.c | 12 ++++++------
> arch/x86/kernel/process_32.c | 2 +-
> arch/x86/kernel/process_64.c | 2 +-
> 4 files changed, 10 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h
> index 9940398e367e..491342f56811 100644
> --- a/arch/x86/include/asm/resctrl.h
> +++ b/arch/x86/include/asm/resctrl.h
> @@ -177,7 +177,7 @@ static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored,
> return READ_ONCE(tsk->rmid) == rmid;
> }
>
> -static inline void resctrl_sched_in(struct task_struct *tsk)
> +static inline void resctrl_arch_sched_in(struct task_struct *tsk)
> {
> if (static_branch_likely(&rdt_enable_key))
> __resctrl_sched_in(tsk);
> @@ -220,7 +220,7 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c);
>
> #else
>
> -static inline void resctrl_sched_in(struct task_struct *tsk) {}
> +static inline void resctrl_arch_sched_in(struct task_struct *tsk) {}
> static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {}
>
> #endif /* CONFIG_X86_CPU_RESCTRL */
> diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> index 085fb9c2333a..218aebd6387f 100644
> --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
> @@ -359,7 +359,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
> }
>
> /*
> - * This is safe against resctrl_sched_in() called from __switch_to()
> + * This is safe against resctrl_arch_sched_in() called from __switch_to()
> * because __switch_to() is executed with interrupts disabled. A local call
> * from update_closid_rmid() is protected against __switch_to() because
> * preemption is disabled.
> @@ -378,7 +378,7 @@ void resctrl_arch_sync_cpu_defaults(void *info)
> * executing task might have its own closid selected. Just reuse
> * the context switch code.
> */
> - resctrl_sched_in(current);
> + resctrl_arch_sched_in(current);
> }
>
> /*
> @@ -605,7 +605,7 @@ static void _update_task_closid_rmid(void *task)
> * Otherwise, the MSR is updated when the task is scheduled in.
> */
> if (task == current)
> - resctrl_sched_in(task);
> + resctrl_arch_sched_in(task);
> }
>
> static void update_task_closid_rmid(struct task_struct *t)
> @@ -663,7 +663,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
> * Ensure the task's closid and rmid are written before determining if
> * the task is current that will decide if it will be interrupted.
> * This pairs with the full barrier between the rq->curr update and
> - * resctrl_sched_in() during context switch.
> + * resctrl_arch_sched_in() during context switch.
> */
> smp_mb();
>
> @@ -2946,8 +2946,8 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
> /*
> * Order the closid/rmid stores above before the loads
> * in task_curr(). This pairs with the full barrier
> - * between the rq->curr update and resctrl_sched_in()
> - * during context switch.
> + * between the rq->curr update and
> + * resctrl_arch_sched_in() during context switch.
> */
> smp_mb();
>
> diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
> index 0917c7f25720..8697b02dabf1 100644
> --- a/arch/x86/kernel/process_32.c
> +++ b/arch/x86/kernel/process_32.c
> @@ -211,7 +211,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
> switch_fpu_finish(next_p);
>
> /* Load the Intel cache allocation PQR MSR. */
> - resctrl_sched_in(next_p);
> + resctrl_arch_sched_in(next_p);
>
> return prev_p;
> }
> diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
> index 7062b84dd467..d1cf885e8930 100644
> --- a/arch/x86/kernel/process_64.c
> +++ b/arch/x86/kernel/process_64.c
> @@ -707,7 +707,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
> }
>
> /* Load the Intel cache allocation PQR MSR. */
> - resctrl_sched_in(next_p);
> + resctrl_arch_sched_in(next_p);
>
> return prev_p;
> }

--
Thanks
Babu Moger