Re: [PATCH v4 03/11] perf/x86/rapl: Rename rapl_pmu variables
From: Zhang, Rui
Date: Thu Jul 11 2024 - 22:14:48 EST
On Thu, 2024-07-11 at 10:24 +0000, Dhananjay Ugwekar wrote:
> Rename struct rapl_pmu variables from "pmu" to "rapl_pmu", to
> avoid any confusion between the variables of two different
> structs pmu and rapl_pmu. As rapl_pmu also contains a pointer to
> struct pmu, which leads to situations in code like pmu->pmu,
> which is needlessly confusing. Above scenario is replaced with
> much more readable rapl_pmu->pmu with this change.
>
> Also rename "pmus" member in rapl_pmus struct, for same reason.
>
> No functional change.
>
> Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@xxxxxxx>
Reviewed-by: Zhang Rui <rui.zhang@xxxxxxxxx>
-rui
> ---
> arch/x86/events/rapl.c | 104 ++++++++++++++++++++-------------------
> --
> 1 file changed, 52 insertions(+), 52 deletions(-)
>
> diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
> index df71f38ad98d..e6162bd84f23 100644
> --- a/arch/x86/events/rapl.c
> +++ b/arch/x86/events/rapl.c
> @@ -123,7 +123,7 @@ struct rapl_pmu {
> struct rapl_pmus {
> struct pmu pmu;
> unsigned int nr_rapl_pmu;
> - struct rapl_pmu *pmus[] __counted_by(nr_rapl_pmu);
> + struct rapl_pmu *rapl_pmu[]
> __counted_by(nr_rapl_pmu);
> };
>
> enum rapl_unit_quirk {
> @@ -171,7 +171,7 @@ static inline struct rapl_pmu
> *cpu_to_rapl_pmu(unsigned int cpu)
> * The unsigned check also catches the '-1' return value for
> non
> * existent mappings in the topology map.
> */
> - return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus-
> >pmus[rapl_pmu_idx] : NULL;
> + return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus-
> >rapl_pmu[rapl_pmu_idx] : NULL;
> }
>
> static inline u64 rapl_read_counter(struct perf_event *event)
> @@ -235,34 +235,34 @@ static void rapl_start_hrtimer(struct rapl_pmu
> *pmu)
>
> static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer
> *hrtimer)
> {
> - struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu,
> hrtimer);
> + struct rapl_pmu *rapl_pmu = container_of(hrtimer, struct
> rapl_pmu, hrtimer);
> struct perf_event *event;
> unsigned long flags;
>
> - if (!pmu->n_active)
> + if (!rapl_pmu->n_active)
> return HRTIMER_NORESTART;
>
> - raw_spin_lock_irqsave(&pmu->lock, flags);
> + raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
>
> - list_for_each_entry(event, &pmu->active_list, active_entry)
> + list_for_each_entry(event, &rapl_pmu->active_list,
> active_entry)
> rapl_event_update(event);
>
> - raw_spin_unlock_irqrestore(&pmu->lock, flags);
> + raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
>
> - hrtimer_forward_now(hrtimer, pmu->timer_interval);
> + hrtimer_forward_now(hrtimer, rapl_pmu->timer_interval);
>
> return HRTIMER_RESTART;
> }
>
> -static void rapl_hrtimer_init(struct rapl_pmu *pmu)
> +static void rapl_hrtimer_init(struct rapl_pmu *rapl_pmu)
> {
> - struct hrtimer *hr = &pmu->hrtimer;
> + struct hrtimer *hr = &rapl_pmu->hrtimer;
>
> hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
> hr->function = rapl_hrtimer_handle;
> }
>
> -static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
> +static void __rapl_pmu_event_start(struct rapl_pmu *rapl_pmu,
> struct perf_event *event)
> {
> if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
> @@ -270,39 +270,39 @@ static void __rapl_pmu_event_start(struct
> rapl_pmu *pmu,
>
> event->hw.state = 0;
>
> - list_add_tail(&event->active_entry, &pmu->active_list);
> + list_add_tail(&event->active_entry, &rapl_pmu->active_list);
>
> local64_set(&event->hw.prev_count, rapl_read_counter(event));
>
> - pmu->n_active++;
> - if (pmu->n_active == 1)
> - rapl_start_hrtimer(pmu);
> + rapl_pmu->n_active++;
> + if (rapl_pmu->n_active == 1)
> + rapl_start_hrtimer(rapl_pmu);
> }
>
> static void rapl_pmu_event_start(struct perf_event *event, int mode)
> {
> - struct rapl_pmu *pmu = event->pmu_private;
> + struct rapl_pmu *rapl_pmu = event->pmu_private;
> unsigned long flags;
>
> - raw_spin_lock_irqsave(&pmu->lock, flags);
> - __rapl_pmu_event_start(pmu, event);
> - raw_spin_unlock_irqrestore(&pmu->lock, flags);
> + raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
> + __rapl_pmu_event_start(rapl_pmu, event);
> + raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
> }
>
> static void rapl_pmu_event_stop(struct perf_event *event, int mode)
> {
> - struct rapl_pmu *pmu = event->pmu_private;
> + struct rapl_pmu *rapl_pmu = event->pmu_private;
> struct hw_perf_event *hwc = &event->hw;
> unsigned long flags;
>
> - raw_spin_lock_irqsave(&pmu->lock, flags);
> + raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
>
> /* mark event as deactivated and stopped */
> if (!(hwc->state & PERF_HES_STOPPED)) {
> - WARN_ON_ONCE(pmu->n_active <= 0);
> - pmu->n_active--;
> - if (pmu->n_active == 0)
> - hrtimer_cancel(&pmu->hrtimer);
> + WARN_ON_ONCE(rapl_pmu->n_active <= 0);
> + rapl_pmu->n_active--;
> + if (rapl_pmu->n_active == 0)
> + hrtimer_cancel(&rapl_pmu->hrtimer);
>
> list_del(&event->active_entry);
>
> @@ -320,23 +320,23 @@ static void rapl_pmu_event_stop(struct
> perf_event *event, int mode)
> hwc->state |= PERF_HES_UPTODATE;
> }
>
> - raw_spin_unlock_irqrestore(&pmu->lock, flags);
> + raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
> }
>
> static int rapl_pmu_event_add(struct perf_event *event, int mode)
> {
> - struct rapl_pmu *pmu = event->pmu_private;
> + struct rapl_pmu *rapl_pmu = event->pmu_private;
> struct hw_perf_event *hwc = &event->hw;
> unsigned long flags;
>
> - raw_spin_lock_irqsave(&pmu->lock, flags);
> + raw_spin_lock_irqsave(&rapl_pmu->lock, flags);
>
> hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
>
> if (mode & PERF_EF_START)
> - __rapl_pmu_event_start(pmu, event);
> + __rapl_pmu_event_start(rapl_pmu, event);
>
> - raw_spin_unlock_irqrestore(&pmu->lock, flags);
> + raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags);
>
> return 0;
> }
> @@ -350,7 +350,7 @@ static int rapl_pmu_event_init(struct perf_event
> *event)
> {
> u64 cfg = event->attr.config & RAPL_EVENT_MASK;
> int bit, ret = 0;
> - struct rapl_pmu *pmu;
> + struct rapl_pmu *rapl_pmu;
>
> /* only look at RAPL events */
> if (event->attr.type != rapl_pmus->pmu.type)
> @@ -380,11 +380,11 @@ static int rapl_pmu_event_init(struct
> perf_event *event)
> return -EINVAL;
>
> /* must be done before validate_group */
> - pmu = cpu_to_rapl_pmu(event->cpu);
> - if (!pmu)
> + rapl_pmu = cpu_to_rapl_pmu(event->cpu);
> + if (!rapl_pmu)
> return -EINVAL;
> - event->cpu = pmu->cpu;
> - event->pmu_private = pmu;
> + event->cpu = rapl_pmu->cpu;
> + event->pmu_private = rapl_pmu;
> event->hw.event_base = rapl_msrs[bit].msr;
> event->hw.config = cfg;
> event->hw.idx = bit;
> @@ -567,22 +567,22 @@ static struct perf_msr amd_rapl_msrs[] = {
> static int rapl_cpu_offline(unsigned int cpu)
> {
> const struct cpumask *rapl_pmu_cpumask =
> get_rapl_pmu_cpumask(cpu);
> - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
> + struct rapl_pmu *rapl_pmu = cpu_to_rapl_pmu(cpu);
> int target;
>
> /* Check if exiting cpu is used for collecting rapl events */
> if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
> return 0;
>
> - pmu->cpu = -1;
> + rapl_pmu->cpu = -1;
> /* Find a new cpu to collect rapl events */
> target = cpumask_any_but(rapl_pmu_cpumask, cpu);
>
> /* Migrate rapl events to the new target */
> if (target < nr_cpu_ids) {
> cpumask_set_cpu(target, &rapl_cpu_mask);
> - pmu->cpu = target;
> - perf_pmu_migrate_context(pmu->pmu, cpu, target);
> + rapl_pmu->cpu = target;
> + perf_pmu_migrate_context(rapl_pmu->pmu, cpu, target);
> }
> return 0;
> }
> @@ -591,21 +591,21 @@ static int rapl_cpu_online(unsigned int cpu)
> {
> unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
> const struct cpumask *rapl_pmu_cpumask =
> get_rapl_pmu_cpumask(cpu);
> - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
> + struct rapl_pmu *rapl_pmu = cpu_to_rapl_pmu(cpu);
> int target;
>
> - if (!pmu) {
> - pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL,
> cpu_to_node(cpu));
> - if (!pmu)
> + if (!rapl_pmu) {
> + rapl_pmu = kzalloc_node(sizeof(*rapl_pmu),
> GFP_KERNEL, cpu_to_node(cpu));
> + if (!rapl_pmu)
> return -ENOMEM;
>
> - raw_spin_lock_init(&pmu->lock);
> - INIT_LIST_HEAD(&pmu->active_list);
> - pmu->pmu = &rapl_pmus->pmu;
> - pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
> - rapl_hrtimer_init(pmu);
> + raw_spin_lock_init(&rapl_pmu->lock);
> + INIT_LIST_HEAD(&rapl_pmu->active_list);
> + rapl_pmu->pmu = &rapl_pmus->pmu;
> + rapl_pmu->timer_interval =
> ms_to_ktime(rapl_timer_ms);
> + rapl_hrtimer_init(rapl_pmu);
>
> - rapl_pmus->pmus[rapl_pmu_idx] = pmu;
> + rapl_pmus->rapl_pmu[rapl_pmu_idx] = rapl_pmu;
> }
>
> /*
> @@ -617,7 +617,7 @@ static int rapl_cpu_online(unsigned int cpu)
> return 0;
>
> cpumask_set_cpu(cpu, &rapl_cpu_mask);
> - pmu->cpu = cpu;
> + rapl_pmu->cpu = cpu;
> return 0;
> }
>
> @@ -686,7 +686,7 @@ static void cleanup_rapl_pmus(void)
> int i;
>
> for (i = 0; i < rapl_pmus->nr_rapl_pmu; i++)
> - kfree(rapl_pmus->pmus[i]);
> + kfree(rapl_pmus->rapl_pmu[i]);
> kfree(rapl_pmus);
> }
>
> @@ -706,7 +706,7 @@ static int __init init_rapl_pmus(void)
> if (!rapl_pmu_is_pkg_scope())
> nr_rapl_pmu *= topology_max_dies_per_package();
>
> - rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus,
> nr_rapl_pmu), GFP_KERNEL);
> + rapl_pmus = kzalloc(struct_size(rapl_pmus, rapl_pmu,
> nr_rapl_pmu), GFP_KERNEL);
> if (!rapl_pmus)
> return -ENOMEM;
>