[kvm-unit-tests PATCH 2/2] x86/pmu: Add AMD Guest PerfMonV2 testcases

From: Like Xu
Date: Mon Sep 05 2022 - 08:44:03 EST


From: Like Xu <likexu@xxxxxxxxxxx>

Updated test cases to cover KVM enabling code for AMD Guest PerfMonV2.

The Intel-specific PMU helpers were added to check for AMD cpuid, and
some of the same semantics of MSRs were assigned during the initialization
phase. The vast majority of pmu test cases are reused seamlessly.

On some x86 machines (AMD only), even with retired events, the same
workload is measured repeatedly and the number of events collected is
erratic, which essentially reflects the details of hardware implementation,
and from a software perspective, the type of event is an unprecise event,
which brings a tolerance check in the counter overflow testcases.

Signed-off-by: Like Xu <likexu@xxxxxxxxxxx>
---
lib/x86/msr.h | 5 ++++
lib/x86/processor.h | 9 ++++++-
x86/pmu.c | 61 ++++++++++++++++++++++++++++++++-------------
3 files changed, 56 insertions(+), 19 deletions(-)

diff --git a/lib/x86/msr.h b/lib/x86/msr.h
index 5f16a58..6f31155 100644
--- a/lib/x86/msr.h
+++ b/lib/x86/msr.h
@@ -419,6 +419,11 @@
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390

+/* AMD Performance Counter Global Status and Control MSRs */
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
+#define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302
+
/* Geode defined MSRs */
#define MSR_GEODE_BUSCONT_CONF0 0x00001900

diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index 9c490d9..b9592c4 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -796,8 +796,12 @@ static inline void flush_tlb(void)

static inline u8 pmu_version(void)
{
- if (!is_intel())
+ if (!is_intel()) {
+ /* Performance Monitoring Version 2 Supported */
+ if (cpuid(0x80000022).a & 0x1)
+ return 2;
return 0;
+ }

return cpuid(10).a & 0xff;
}
@@ -824,6 +828,9 @@ static inline u8 pmu_nr_gp_counters(void)
{
if (is_intel()) {
return (cpuid(10).a >> 8) & 0xff;
+ } else if (this_cpu_has_perf_global_ctrl()) {
+ /* Number of Core Performance Counters. */
+ return cpuid(0x80000022).b & 0xf;
} else if (!has_amd_perfctr_core()) {
return AMD64_NUM_COUNTERS;
}
diff --git a/x86/pmu.c b/x86/pmu.c
index 11607c0..6d5363b 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -72,6 +72,9 @@ struct pmu_event {
#define PMU_CAP_FW_WRITES (1ULL << 13)
static u32 gp_counter_base;
static u32 gp_select_base;
+static u32 global_status_msr;
+static u32 global_ctl_msr;
+static u32 global_status_clr_msr;
static unsigned int gp_events_size;
static unsigned int nr_gp_counters;

@@ -150,8 +153,7 @@ static void global_enable(pmu_counter_t *cnt)
return;

cnt->idx = event_to_global_idx(cnt);
- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) |
- (1ull << cnt->idx));
+ wrmsr(global_ctl_msr, rdmsr(global_ctl_msr) | (1ull << cnt->idx));
}

static void global_disable(pmu_counter_t *cnt)
@@ -159,8 +161,7 @@ static void global_disable(pmu_counter_t *cnt)
if (pmu_version() < 2)
return;

- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) &
- ~(1ull << cnt->idx));
+ wrmsr(global_ctl_msr, rdmsr(global_ctl_msr) & ~(1ull << cnt->idx));
}

static inline uint32_t get_gp_counter_msr(unsigned int i)
@@ -326,6 +327,23 @@ static void check_counters_many(void)
report(i == n, "all counters");
}

+static bool is_the_count_reproducible(pmu_counter_t *cnt)
+{
+ unsigned int i;
+ uint64_t count;
+
+ __measure(cnt, 0);
+ count = cnt->count;
+
+ for (i = 0; i < 10; i++) {
+ __measure(cnt, 0);
+ if (count != cnt->count)
+ return false;
+ }
+
+ return true;
+}
+
static void check_counter_overflow(void)
{
uint64_t count;
@@ -334,13 +352,14 @@ static void check_counter_overflow(void)
.ctr = gp_counter_base,
.config = EVNTSEL_OS | EVNTSEL_USR | (*gp_events)[1].unit_sel /* instructions */,
};
+ bool precise_event = is_the_count_reproducible(&cnt);
+
__measure(&cnt, 0);
count = cnt.count;

/* clear status before test */
if (pmu_version() > 1) {
- wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL,
- rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
+ wrmsr(global_status_clr_msr, rdmsr(global_status_msr));
}

report_prefix_push("overflow");
@@ -373,7 +392,7 @@ static void check_counter_overflow(void)
__measure(&cnt, cnt.count);

report(check_irq() == (i % 2), "irq-%d", i);
- if (pmu_version() > 1)
+ if (precise_event)
report(cnt.count == 1, "cntr-%d", i);
else
report(cnt.count < 4, "cntr-%d", i);
@@ -381,10 +400,10 @@ static void check_counter_overflow(void)
if (pmu_version() < 2)
continue;

- status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+ status = rdmsr(global_status_msr);
report(status & (1ull << idx), "status-%d", i);
- wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, status);
- status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+ wrmsr(global_status_clr_msr, status);
+ status = rdmsr(global_status_msr);
report(!(status & (1ull << idx)), "status clear-%d", i);
}

@@ -492,8 +511,7 @@ static void check_running_counter_wrmsr(void)

/* clear status before overflow test */
if (pmu_version() > 1) {
- wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL,
- rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
+ wrmsr(global_status_clr_msr, rdmsr(global_status_msr));
}

start_event(&evt);
@@ -508,7 +526,7 @@ static void check_running_counter_wrmsr(void)
stop_event(&evt);

if (pmu_version() > 1) {
- status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+ status = rdmsr(global_status_msr);
report(status & 1, "status");
}

@@ -532,8 +550,7 @@ static void check_emulated_instr(void)
report_prefix_push("emulated instruction");

if (pmu_version() > 1) {
- wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL,
- rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
+ wrmsr(global_status_clr_msr, rdmsr(global_status_msr));
}

start_event(&brnch_cnt);
@@ -576,7 +593,7 @@ static void check_emulated_instr(void)
: "eax", "ebx", "ecx", "edx");

if (pmu_version() > 1)
- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ wrmsr(global_ctl_msr, 0);

stop_event(&brnch_cnt);
stop_event(&instr_cnt);
@@ -590,7 +607,7 @@ static void check_emulated_instr(void)

if (pmu_version() > 1) {
// Additionally check that those counters overflowed properly.
- status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+ status = rdmsr(global_status_msr);
report(status & 1, "instruction counter overflow");
report(status & 2, "branch counter overflow");
}
@@ -679,7 +696,7 @@ static void set_ref_cycle_expectations(void)
if (!nr_gp_counters || !pmu_gp_counter_is_available(2))
return;

- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ wrmsr(global_ctl_msr, 0);

t0 = fenced_rdtsc();
start_event(&cnt);
@@ -722,6 +739,10 @@ static bool detect_intel_pmu(void)
gp_counter_base = MSR_IA32_PERFCTR0;
gp_select_base = MSR_P6_EVNTSEL0;

+ global_status_msr = MSR_CORE_PERF_GLOBAL_STATUS;
+ global_ctl_msr = MSR_CORE_PERF_GLOBAL_CTRL;
+ global_status_clr_msr = MSR_CORE_PERF_GLOBAL_OVF_CTRL;
+
report_prefix_push("Intel");
return true;
}
@@ -746,6 +767,10 @@ static bool detect_amd_pmu(void)
gp_counter_base = MSR_F15H_PERF_CTR0;
gp_select_base = MSR_F15H_PERF_CTL0;

+ global_status_msr = MSR_AMD64_PERF_CNTR_GLOBAL_STATUS;
+ global_ctl_msr = MSR_AMD64_PERF_CNTR_GLOBAL_CTL;
+ global_status_clr_msr = MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR;
+
report_prefix_push("AMD");
return true;
}
--
2.37.3