[PATCH v1 1/2] perf/x86: Avoid inadvertent casts to x86_hybrid_pmu

From: Ian Rogers

Date: Thu Mar 12 2026 - 01:48:30 EST


The patch:
https://lore.kernel.org/lkml/20260311075201.2951073-2-dapeng1.mi@xxxxxxxxxxxxxxx/
showed it was pretty easy to accidentally cast non-x86 PMUs to
x86_hybrid_pmus. Add a BUG_ON for that case. Restructure is_x86_event
and add an is_x86_pmu to facilitate this.

Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>
---
Only build tested.
---
arch/x86/events/core.c | 16 ----------------
arch/x86/events/perf_event.h | 19 ++++++++++++++++++-
2 files changed, 18 insertions(+), 17 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 03ce1bc7ef2e..6c6567dc6c88 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -774,22 +774,6 @@ void x86_pmu_enable_all(int added)
}
}

-int is_x86_event(struct perf_event *event)
-{
- /*
- * For a non-hybrid platforms, the type of X86 pmu is
- * always PERF_TYPE_RAW.
- * For a hybrid platform, the PERF_PMU_CAP_EXTENDED_HW_TYPE
- * is a unique capability for the X86 PMU.
- * Use them to detect a X86 event.
- */
- if (event->pmu->type == PERF_TYPE_RAW ||
- event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)
- return true;
-
- return false;
-}
-
struct pmu *x86_get_pmu(unsigned int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index fad87d3c8b2c..f1123c95d174 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -115,7 +115,23 @@ static inline bool is_topdown_event(struct perf_event *event)
return is_metric_event(event) || is_slots_event(event);
}

-int is_x86_event(struct perf_event *event);
+static inline bool is_x86_pmu(struct pmu *pmu)
+{
+ /*
+ * For a non-hybrid platforms, the type of X86 pmu is
+ * always PERF_TYPE_RAW.
+ * For a hybrid platform, the PERF_PMU_CAP_EXTENDED_HW_TYPE
+ * is a unique capability for the X86 PMU.
+ * Use them to detect a X86 event.
+ */
+ return pmu->type == PERF_TYPE_RAW ||
+ (pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE);
+}
+
+static inline bool is_x86_event(struct perf_event *event)
+{
+ return is_x86_pmu(event->pmu);
+}

static inline bool check_leader_group(struct perf_event *leader, int flags)
{
@@ -779,6 +795,7 @@ struct x86_hybrid_pmu {

static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
{
+ BUG_ON(!is_x86_pmu(pmu));
return container_of(pmu, struct x86_hybrid_pmu, pmu);
}

--
2.53.0.851.ga537e3e6e9-goog