[RFC v1 4/9] KVM: x86: Implement counter reload MSRs read/write emulation

From: Luwei Kang
Date: Thu Aug 29 2019 - 01:39:19 EST


This patch implements the counter reload register
MSR_RELOAD_PMCx/FIXED_CTRx read/write emulation. These registers
can be accessed only when PEBS is supported in KVM.

VMM need to reprogram the counters to make the host PMU framework
load the value to real hardware after configuration has been changed.

Signed-off-by: Luwei Kang <luwei.kang@xxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/include/asm/msr-index.h | 3 +++
arch/x86/kvm/vmx/pmu_intel.c | 22 +++++++++++++++++++++-
3 files changed, 25 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index df966c9..9b930b5 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -454,6 +454,7 @@ struct kvm_pmc {
enum pmc_type type;
u8 idx;
u64 counter;
+ u64 reload_cnt;
u64 eventsel;
struct perf_event *perf_event;
struct kvm_vcpu *vcpu;
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index a9e8720..6321acb 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -141,6 +141,9 @@
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6

+#define MSR_IA32_RELOAD_PMC0 0x000014c1
+#define MSR_IA32_RELOAD_FIXED_CTR0 0x00001309
+
#define MSR_IA32_RTIT_CTL 0x00000570
#define RTIT_CTL_TRACEEN BIT(0)
#define RTIT_CTL_CYCLEACC BIT(1)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index fc79cc6..ebd3efc 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -175,7 +175,9 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
- get_fixed_pmc(pmu, msr, MSR_CORE_PERF_FIXED_CTR0);
+ get_fixed_pmc(pmu, msr, MSR_CORE_PERF_FIXED_CTR0) ||
+ get_gp_pmc(pmu, msr, MSR_IA32_RELOAD_PMC0) ||
+ get_fixed_pmc(pmu, msr, MSR_IA32_RELOAD_FIXED_CTR0);
break;
}

@@ -216,6 +218,11 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
*data = pmc->eventsel;
return 0;
+ } else if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_RELOAD_PMC0)) ||
+ (pmc = get_fixed_pmc(pmu, msr,
+ MSR_IA32_RELOAD_FIXED_CTR0))) {
+ *data = pmc->reload_cnt;
+ return 0;
}
}

@@ -288,6 +295,19 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
reprogram_gp_counter(pmc, data);
return 0;
}
+ } else if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_RELOAD_PMC0)) ||
+ (pmc = get_fixed_pmc(pmu, msr,
+ MSR_IA32_RELOAD_FIXED_CTR0))) {
+ if (data == pmc->reload_cnt)
+ return 0;
+ if (!(data & ~pmc_bitmask(pmc))) {
+ int pmc_idx = pmc_is_fixed(pmc) ?
+ pmc->idx + INTEL_PMC_IDX_FIXED :
+ pmc->idx;
+ pmc->reload_cnt = data;
+ reprogram_counter(pmu, pmc_idx);
+ return 0;
+ }
}
}

--
1.8.3.1