On Fri, Jan 26, 2024, Xiong Zhang wrote:
static void intel_save_pmu_context(struct kvm_vcpu *vcpu)For the next RFC, please make that it includes AMD support. Mostly because I'm
{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc;
+ u32 i;
+
+ if (pmu->version != 2) {
+ pr_warn("only PerfMon v2 is supported for passthrough PMU");
+ return;
+ }
+
+ /* Global ctrl register is already saved at VM-exit. */
+ rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, pmu->global_status);
+ /* Clear hardware MSR_CORE_PERF_GLOBAL_STATUS MSR, if non-zero. */
+ if (pmu->global_status)
+ wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, pmu->global_status);
+
+ for (i = 0; i < pmu->nr_arch_gp_counters; i++) {
+ pmc = &pmu->gp_counters[i];
+ rdpmcl(i, pmc->counter);
+ rdmsrl(i + MSR_ARCH_PERFMON_EVENTSEL0, pmc->eventsel);
+ /*
+ * Clear hardware PERFMON_EVENTSELx and its counter to avoid
+ * leakage and also avoid this guest GP counter get accidentally
+ * enabled during host running when host enable global ctrl.
+ */
+ if (pmc->eventsel)
+ wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0);
+ if (pmc->counter)
+ wrmsrl(MSR_IA32_PMC0 + i, 0);
+ }
+
+ rdmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, pmu->fixed_ctr_ctrl);
+ /*
+ * Clear hardware FIXED_CTR_CTRL MSR to avoid information leakage and
+ * also avoid these guest fixed counters get accidentially enabled
+ * during host running when host enable global ctrl.
+ */
+ if (pmu->fixed_ctr_ctrl)
+ wrmsrl(MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+ pmc = &pmu->fixed_counters[i];
+ rdpmcl(INTEL_PMC_FIXED_RDPMC_BASE | i, pmc->counter);
+ if (pmc->counter)
+ wrmsrl(MSR_CORE_PERF_FIXED_CTR0 + i, 0);
+ }
pretty all of this code can be in common x86. The fixed counters are ugly,
but pmu->nr_arch_fixed_counters is guaranteed to '0' on AMD, so it's _just_ ugly,
i.e. not functionally problematic.