[PATCH 1/2] perf/x86/rapl: Move the pmu allocation out of CPU hotplug

From: kan . liang
Date: Fri Sep 13 2024 - 13:10:15 EST


From: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>

The rapl pmu just needs to be allocated once. It doesn't matter to be
allocated at each CPU hotplug, or the global init_rapl_pmus().

Move the pmu allocation to the init_rapl_pmus(). So the generic hotplug
supports can be applied.

Tested-by: Oliver Sang <oliver.sang@xxxxxxxxx>
Signed-off-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>
Cc: Dhananjay Ugwekar <Dhananjay.Ugwekar@xxxxxxx>
---

Rebase on top of perf/core branch
commit bac2a553dbf2 ("perf/x86/intel: Add PMU support for ArrowLake-H")
Close the UBSAN issue.
https://lore.kernel.org/oe-lkp/202409111521.c7c6d56f-lkp@xxxxxxxxx/

arch/x86/events/rapl.c | 50 +++++++++++++++++++++++++++++++-----------
1 file changed, 37 insertions(+), 13 deletions(-)

diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index a481a939862e..a6f31978a5b4 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -602,19 +602,8 @@ static int rapl_cpu_online(unsigned int cpu)
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;

- if (!pmu) {
- pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
- if (!pmu)
- return -ENOMEM;
-
- raw_spin_lock_init(&pmu->lock);
- INIT_LIST_HEAD(&pmu->active_list);
- pmu->pmu = &rapl_pmus->pmu;
- pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
- rapl_hrtimer_init(pmu);
-
- rapl_pmus->pmus[rapl_pmu_idx] = pmu;
- }
+ if (!pmu)
+ return -ENOMEM;

/*
* Check if there is an online cpu in the package which collects rapl
@@ -707,6 +696,38 @@ static const struct attribute_group *rapl_attr_update[] = {
NULL,
};

+static void __init init_rapl_pmu(void)
+{
+ struct rapl_pmu *pmu;
+ s32 rapl_pmu_idx;
+ int cpu;
+
+ cpus_read_lock();
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ pmu = cpu_to_rapl_pmu(cpu);
+ if (pmu)
+ continue;
+ rapl_pmu_idx = get_rapl_pmu_idx(cpu);
+ if (WARN_ON_ONCE(rapl_pmu_idx < 0))
+ continue;
+
+ pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+ if (!pmu)
+ continue;
+
+ raw_spin_lock_init(&pmu->lock);
+ INIT_LIST_HEAD(&pmu->active_list);
+ pmu->pmu = &rapl_pmus->pmu;
+ pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+ rapl_hrtimer_init(pmu);
+
+ rapl_pmus->pmus[rapl_pmu_idx] = pmu;
+ }
+
+ cpus_read_unlock();
+}
+
static int __init init_rapl_pmus(void)
{
int nr_rapl_pmu = topology_max_packages();
@@ -730,6 +751,9 @@ static int __init init_rapl_pmus(void)
rapl_pmus->pmu.read = rapl_pmu_event_read;
rapl_pmus->pmu.module = THIS_MODULE;
rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
+
+ init_rapl_pmu();
+
return 0;
}

--
2.38.1