[PATCH 05/12] perf/x86: Add config_mask to represent EVENTSEL bitmask

From: kan . liang
Date: Thu Apr 04 2024 - 10:08:18 EST


From: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>

Different vendors may support different fields in EVENTSEL MSR, such as
Intel would introduce new fields umask2 and eq bits in EVENTSEL MSR
since Perfmon version 6. However, a fixed mask X86_RAW_EVENT_MASK is
used to filter the attr.config.

Introduce a new config_mask to record the real supported EVENTSEL
bitmask.
Only apply it to the existing code now. No functional change.

Reviewed-by: Andi Kleen <ak@xxxxxxxxxxxxxxx>
Co-developed-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx>
Signed-off-by: Dapeng Mi <dapeng1.mi@xxxxxxxxxxxxxxx>
Signed-off-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>
---
arch/x86/events/core.c | 5 ++++-
arch/x86/events/intel/core.c | 1 +
arch/x86/events/perf_event.h | 7 +++++++
3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 08c31f48cead..f692e7fba740 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -624,7 +624,7 @@ int x86_pmu_hw_config(struct perf_event *event)
event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;

if (event->attr.type == event->pmu->type)
- event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
+ event->hw.config |= x86_pmu_get_event_config(event);

if (event->attr.sample_period && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
@@ -2097,6 +2097,9 @@ static int __init init_hw_perf_events(void)
if (!x86_pmu.intel_ctrl)
x86_pmu.intel_ctrl = x86_pmu.cntr_mask64;

+ if (!x86_pmu.config_mask)
+ x86_pmu.config_mask = X86_RAW_EVENT_MASK;
+
perf_events_lapic_init();
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 16b4efdad03e..bbb98b4c2a84 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -6151,6 +6151,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus)
pmu->cntr_mask64 = x86_pmu.cntr_mask64;
pmu->fixed_cntr_mask64 = x86_pmu.fixed_cntr_mask64;
pmu->pebs_events_mask = intel_pmu_pebs_mask(pmu->cntr_mask64);
+ pmu->config_mask = X86_RAW_EVENT_MASK;
pmu->unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, pmu->cntr_mask64,
0, x86_pmu_num_counters(&pmu->pmu), 0, 0);
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 3a65bfbb1173..4e448696bf96 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -695,6 +695,7 @@ struct x86_hybrid_pmu {
union perf_capabilities intel_cap;
u64 intel_ctrl;
u64 pebs_events_mask;
+ u64 config_mask;
union {
u64 cntr_mask64;
unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -790,6 +791,7 @@ struct x86_pmu {
int (*rdpmc_index)(int index);
u64 (*event_map)(int);
int max_events;
+ u64 config_mask;
union {
u64 cntr_mask64;
unsigned long cntr_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -1231,6 +1233,11 @@ static inline int x86_pmu_num_counters_fixed(struct pmu *pmu)
return hweight64(hybrid(pmu, fixed_cntr_mask64));
}

+static inline u64 x86_pmu_get_event_config(struct perf_event *event)
+{
+ return event->attr.config & hybrid(event->pmu, config_mask);
+}
+
extern struct event_constraint emptyconstraint;

extern struct event_constraint unconstrained;
--
2.35.1