[PATCH 5/5] KVM: x86/pmu: Hide guest counter updates from the VMRUN instruction

From: Like Xu
Date: Fri Mar 10 2023 - 05:55:06 EST


From: Like Xu <likexu@xxxxxxxxxxx>

When AMD guest is counting (branch) instructions event, its vPMU should
first subtract one for any relevant (branch)-instructions enabled counter
(when it precedes VMRUN and cannot be preempted) to offset the inevitable
plus-one effect of the VMRUN instruction immediately follows.

Based on a number of micro observations (also the reason why x86_64/
pmu_event_filter_test fails on AMD Zen platforms), each VMRUN will
increment all hw-(branch)-instructions counters by 1, even if they are
only enabled for guest code. This issue seriously affects the performance
understanding of guest developers based on (branch) instruction events.

If the current physical register value on the hardware is ~0x0, it triggers
an overflow in the guest world right after running VMRUN. Although this
cannot be avoided on mainstream released hardware, the resulting PMI
(if configured) will not be incorrectly injected into the guest by vPMU,
since the delayed injection mechanism for a normal counter overflow
depends only on the change of pmc->counter values.

The pmu_hide_vmrun() is called before each VMRUN and its overhead
depends on the number of counters enabled by the guest and is negligible
when none of the counters are used.

Cc: Ravi Bangoria <ravi.bangoria@xxxxxxx>
Signed-off-by: Like Xu <likexu@xxxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 4 ++++
arch/x86/kvm/pmu.c | 18 ++++++++++++++++++
arch/x86/kvm/pmu.h | 24 +++++++++++++++++++++++-
arch/x86/kvm/svm/pmu.c | 1 +
arch/x86/kvm/svm/svm.c | 28 ++++++++++++++++++++++++++++
5 files changed, 74 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index adb92fc4d7c9..d6fcbf233cb3 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -561,6 +561,10 @@ struct kvm_pmu {
*/
u64 host_cross_mapped_mask;

+ /* Flags to track any HW quirks that need to be fixed by vPMU. */
+ u64 quirk_flags;
+ DECLARE_BITMAP(hide_vmrun_pmc_idx, X86_PMC_IDX_MAX);
+
/*
* The gate to release perf_events not marked in
* pmc_in_use only once in a vcpu time slice.
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 2a0504732966..315dca021d57 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -254,6 +254,7 @@ static void pmc_pause_counter(struct kvm_pmc *pmc)
counter += perf_event_pause(pmc->perf_event, true);
pmc->counter = counter & pmc_bitmask(pmc);
pmc->is_paused = true;
+ kvm_mark_pmc_is_quirky(pmc);
}

static bool pmc_resume_counter(struct kvm_pmc *pmc)
@@ -822,6 +823,19 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
return r;
}

+static inline bool event_is_branch_instruction(struct kvm_pmc *pmc)
+{
+ return eventsel_match_perf_hw_id(pmc, PERF_COUNT_HW_INSTRUCTIONS) ||
+ eventsel_match_perf_hw_id(pmc,
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+}
+
+static inline bool quirky_pmc_will_count_vmrun(struct kvm_pmc *pmc)
+{
+ return event_is_branch_instruction(pmc) && event_is_allowed(pmc) &&
+ !static_call(kvm_x86_get_cpl)(pmc->vcpu);
+}
+
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -837,6 +851,10 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)

reprogram_counter(pmc);
kvm_pmu_handle_pmc_overflow(pmc);
+
+ if (vcpu_has_pmu_quirks(vcpu) &&
+ quirky_pmc_will_count_vmrun(pmc))
+ set_bit(pmc->idx, pmu->hide_vmrun_pmc_idx);
}

/*
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index a47b579667c6..30f6f58f4c38 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -18,6 +18,9 @@
#define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002

+#define X86_PMU_COUNT_VMRUN BIT_ULL(0)
+#define X86_PMU_QUIRKS_MASK X86_PMU_COUNT_VMRUN
+
struct kvm_pmu_ops {
bool (*hw_event_available)(struct kvm_pmc *pmc);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
@@ -54,14 +57,33 @@ static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
}

+static inline bool vcpu_has_pmu_quirks(struct kvm_vcpu *vcpu)
+{
+ return vcpu_to_pmu(vcpu)->quirk_flags & X86_PMU_QUIRKS_MASK;
+}
+
+/*
+ * The time to mark pmc is when the accumulation value returned
+ * by perf API based on a HW counter has just taken effect.
+ */
+static inline void kvm_mark_pmc_is_quirky(struct kvm_pmc *pmc)
+{
+ if (!vcpu_has_pmu_quirks(pmc->vcpu))
+ return;
+
+ kvm_pmu_request_counter_reprogram(pmc);
+}
+
static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
{
u64 counter, enabled, running;

counter = pmc->counter;
- if (pmc->perf_event && !pmc->is_paused)
+ if (pmc->perf_event && !pmc->is_paused) {
counter += perf_event_read_value(pmc->perf_event,
&enabled, &running);
+ kvm_mark_pmc_is_quirky(pmc);
+ }
/* FIXME: Scaling needed? */
return counter & pmc_bitmask(pmc);
}
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 5fa939e411d8..130991a97f22 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -187,6 +187,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_fixed_counters = 0;
pmu->global_status = 0;
bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
+ pmu->quirk_flags |= X86_PMU_COUNT_VMRUN;
}

static void amd_pmu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f41d96e638ef..f6b33d172481 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3919,6 +3919,31 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_NONE;
}

+static void pmu_hide_vmrun(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc;
+ unsigned int i;
+
+ for_each_set_bit(i, pmu->hide_vmrun_pmc_idx, X86_PMC_IDX_MAX) {
+ clear_bit(i, pmu->hide_vmrun_pmc_idx);
+
+ /* AMD doesn't have fixed counters at now. */
+ if (i >= pmu->nr_arch_gp_counters)
+ continue;
+
+ /*
+ * The prerequisite for fixing HW quirks is that there is indeed
+ * HW working and perf has no chance to retrieve the counter.
+ */
+ pmc = &pmu->gp_counters[i];
+ if (!pmc->perf_event || pmc->perf_event->hw.idx < 0)
+ continue;
+
+ pmc->counter--;
+ }
+}
+
static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3986,6 +4011,9 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)

kvm_wait_lapic_expire(vcpu);

+ if (vcpu->kvm->arch.enable_pmu && vcpu_has_pmu_quirks(vcpu))
+ pmu_hide_vmrun(vcpu);
+
/*
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
* it's non-zero. Since vmentry is serialising on affected CPUs, there
--
2.39.2