[tip: perf/core] perf/core: Merge struct pmu::pmu_disable_count into struct perf_cpu_pmu_context::pmu_disable_count

From: tip-bot2 for Peter Zijlstra
Date: Tue Mar 04 2025 - 04:03:09 EST


The following commit has been merged into the perf/core branch of tip:

Commit-ID: 4baeb0687abf5eca3f7ab8b147c27cce82ec49ea
Gitweb: https://git.kernel.org/tip/4baeb0687abf5eca3f7ab8b147c27cce82ec49ea
Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
AuthorDate: Mon, 04 Nov 2024 14:39:18 +01:00
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitterDate: Tue, 04 Mar 2025 09:42:47 +01:00

perf/core: Merge struct pmu::pmu_disable_count into struct perf_cpu_pmu_context::pmu_disable_count

Because it makes no sense to have two per-cpu allocations per pmu.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
Reviewed-by: Ravi Bangoria <ravi.bangoria@xxxxxxx>
Link: https://lore.kernel.org/r/20241104135518.518730578@xxxxxxxxxxxxx
---
include/linux/perf_event.h | 2 +-
kernel/events/core.c | 12 ++++--------
2 files changed, 5 insertions(+), 9 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8c0117b..5f293e6 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -343,7 +343,6 @@ struct pmu {
*/
unsigned int scope;

- int __percpu *pmu_disable_count;
struct perf_cpu_pmu_context __percpu *cpu_pmu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
@@ -1031,6 +1030,7 @@ struct perf_cpu_pmu_context {

int active_oncpu;
int exclusive;
+ int pmu_disable_count;

raw_spinlock_t hrtimer_lock;
struct hrtimer hrtimer;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 348a379..8321b71 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1219,21 +1219,22 @@ static int perf_mux_hrtimer_restart_ipi(void *arg)

void perf_pmu_disable(struct pmu *pmu)
{
- int *count = this_cpu_ptr(pmu->pmu_disable_count);
+ int *count = &this_cpu_ptr(pmu->cpu_pmu_context)->pmu_disable_count;
if (!(*count)++)
pmu->pmu_disable(pmu);
}

void perf_pmu_enable(struct pmu *pmu)
{
- int *count = this_cpu_ptr(pmu->pmu_disable_count);
+ int *count = &this_cpu_ptr(pmu->cpu_pmu_context)->pmu_disable_count;
if (!--(*count))
pmu->pmu_enable(pmu);
}

static void perf_assert_pmu_disabled(struct pmu *pmu)
{
- WARN_ON_ONCE(*this_cpu_ptr(pmu->pmu_disable_count) == 0);
+ int *count = &this_cpu_ptr(pmu->cpu_pmu_context)->pmu_disable_count;
+ WARN_ON_ONCE(*count == 0);
}

static inline void perf_pmu_read(struct perf_event *event)
@@ -11906,7 +11907,6 @@ static bool idr_cmpxchg(struct idr *idr, unsigned long id, void *old, void *new)

static void perf_pmu_free(struct pmu *pmu)
{
- free_percpu(pmu->pmu_disable_count);
if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) {
if (pmu->nr_addr_filters)
device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
@@ -11925,10 +11925,6 @@ int perf_pmu_register(struct pmu *_pmu, const char *name, int type)
struct pmu *pmu __free(pmu_unregister) = _pmu;
guard(mutex)(&pmus_lock);

- pmu->pmu_disable_count = alloc_percpu(int);
- if (!pmu->pmu_disable_count)
- return -ENOMEM;
-
if (WARN_ONCE(!name, "Can not register anonymous pmu.\n"))
return -EINVAL;