[PATCH v3 07/11] KVM: selftests: Test Intel supported fixed counters bit mask
From: Jinrong Liang
Date: Mon Aug 14 2023 - 07:52:39 EST
From: Jinrong Liang <cloudliang@xxxxxxxxxxx>
Add a test to check that fixed counters enabled via guest
CPUID.0xA.ECX (instead of EDX[04:00]) work as normal as usual.
Co-developed-by: Like Xu <likexu@xxxxxxxxxxx>
Signed-off-by: Like Xu <likexu@xxxxxxxxxxx>
Signed-off-by: Jinrong Liang <cloudliang@xxxxxxxxxxx>
---
.../kvm/x86_64/pmu_basic_functionality_test.c | 60 +++++++++++++++++++
1 file changed, 60 insertions(+)
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
index db1c1230700a..3bbf3bd2846b 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_basic_functionality_test.c
@@ -282,6 +282,65 @@ static void intel_test_counters_num(void)
}
}
+static void intel_guest_run_fixed_counters(void)
+{
+ uint64_t supported_bitmask = this_cpu_property(X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK);
+ uint32_t nr_fixed_counter = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+ uint64_t msr_val;
+ unsigned int i;
+ bool expected;
+
+ for (i = 0; i < nr_fixed_counter; i++) {
+ expected = supported_bitmask & BIT_ULL(i) || i < nr_fixed_counter;
+
+ wrmsr_safe(MSR_CORE_PERF_FIXED_CTR0 + i, 0);
+ wrmsr_safe(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * i));
+ wrmsr_safe(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(INTEL_PMC_IDX_FIXED + i));
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+ wrmsr_safe(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ rdmsr_safe(MSR_CORE_PERF_FIXED_CTR0 + i, &msr_val);
+
+ GUEST_ASSERT(expected == !!msr_val);
+ }
+
+ GUEST_DONE();
+}
+
+static void test_fixed_counters_setup(struct kvm_vcpu *vcpu,
+ uint32_t fixed_bitmask,
+ uint8_t edx_fixed_num)
+{
+ int ret;
+
+ vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK,
+ fixed_bitmask);
+ vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_FIXED_COUNTERS,
+ edx_fixed_num);
+
+ do {
+ ret = run_vcpu(vcpu, NULL);
+ } while (ret != UCALL_DONE);
+}
+
+static void intel_test_fixed_counters(void)
+{
+ uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ uint32_t ecx;
+ uint8_t edx;
+
+ for (edx = 0; edx <= nr_fixed_counters; edx++) {
+ /* KVM doesn't emulate more fixed counters than it can support. */
+ for (ecx = 0; ecx <= (BIT_ULL(nr_fixed_counters) - 1); ecx++) {
+ vm = pmu_vm_create_with_one_vcpu(&vcpu,
+ intel_guest_run_fixed_counters);
+ test_fixed_counters_setup(vcpu, ecx, edx);
+ kvm_vm_free(vm);
+ }
+ }
+}
+
int main(int argc, char *argv[])
{
TEST_REQUIRE(get_kvm_param_bool("enable_pmu"));
@@ -293,6 +352,7 @@ int main(int argc, char *argv[])
intel_test_arch_events();
intel_test_counters_num();
+ intel_test_fixed_counters();
return 0;
}
--
2.39.3