Re: [PATCH v4 3/6] KVM: x86/pmu: Disable counters based on Host-Only/Guest-Only bits in SVM
From: Sean Christopherson
Date: Mon Apr 06 2026 - 21:30:23 EST
On Thu, Mar 26, 2026, Yosry Ahmed wrote:
> diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
> index ff5acb8b199b0..5961c002b28eb 100644
> --- a/arch/x86/include/asm/perf_event.h
> +++ b/arch/x86/include/asm/perf_event.h
> @@ -60,6 +60,8 @@
> #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
> #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
> #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
> +#define AMD64_EVENTSEL_HOST_GUEST_MASK \
> + (AMD64_EVENTSEL_HOSTONLY | AMD64_EVENTSEL_GUESTONLY)
>
> #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
> #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index d6ac3c55fce55..e35d598f809a2 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -559,6 +559,7 @@ static int reprogram_counter(struct kvm_pmc *pmc)
>
> if (kvm_vcpu_has_mediated_pmu(pmu_to_vcpu(pmu))) {
> kvm_mediated_pmu_refresh_event_filter(pmc);
> + kvm_pmu_call(mediated_reprogram_counter)(pmc);
I would rather make a single call from kvm_pmu_handle_event(), and let the vendor
deal with mediated vs. legacy. I want to avoid mediated-specific ops as much as
possible, and I think kvm_x86_ops.reprogram_counters() would be easier to
understand overall.
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index a7b38c104d06..7da0077ae24c 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -670,6 +670,8 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
set_bit(pmc->idx, pmu->reprogram_pmi);
}
+ kvm_pmu_call(reprogram_counters)(vcpu, bitmap);
+
/*
* Release unused perf_events if the corresponding guest MSRs weren't
* accessed during the last vCPU time slice (need_cleanup is set when
> diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
> index 7aa298eeb0721..60931dfd624b2 100644
> --- a/arch/x86/kvm/svm/pmu.c
> +++ b/arch/x86/kvm/svm/pmu.c
> @@ -260,6 +260,34 @@ static void amd_mediated_pmu_put(struct kvm_vcpu *vcpu)
> wrmsrq(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, pmu->global_status);
> }
>
> +static void amd_mediated_pmu_handle_host_guest_bits(struct kvm_pmc *pmc)
> +{
> + struct kvm_vcpu *vcpu = pmc->vcpu;
> + u64 host_guest_bits;
> +
> + if (!(pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE))
> + return;
> +
> + /* Count all events if both bits are cleared or both bits are set */
> + host_guest_bits = pmc->eventsel & AMD64_EVENTSEL_HOST_GUEST_MASK;
> + if (hweight64(host_guest_bits) != 1)
> + return;
> +
> + /* Host-Only and Guest-Only are ignored if EFER.SVME == 0 */
> + if (!(vcpu->arch.efer & EFER_SVME))
> + return;
> +
> + if (!!(host_guest_bits & AMD64_EVENTSEL_GUESTONLY) == is_guest_mode(vcpu))
> + return;
> +
> + pmc->eventsel_hw &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
> +}
> +
> +static void amd_mediated_pmu_reprogram_counter(struct kvm_pmc *pmc)
> +{
> + amd_mediated_pmu_handle_host_guest_bits(pmc);
And then this doesn't need to be such a wonky wrapper, and the "reprogram on
nested transition" logic can also clear the entire bitmap instead of doing things
piecemeal, e.g. it can be something like so in the end:
if (!kvm_vcpu_has_mediated_pmu(vcpu))
return;
bitmap_zero(pmu->pmc_reprogram_on_nested_transition, X86_PMC_IDX_MAX);
kvm_for_each_pmc(pmu, pmc, bit, bitmap)
amd_mediated_pmu_handle_host_guest_bits(pmc);