From: Dongli Si <sidongli1997@xxxxxxxxx>
perf_ctr_virt_mask is used to mask Host-Only bit when SVM is disabled,
Using it on a guest doesn't make sense and make things obscure.
Revert commit df51fe7ea1c1c
("perf/x86/amd: Don't touch the AMD64_EVENTSEL_HOSTONLY bit inside the guest"),
Because it make things a little obscure and this #GP has been fixed in KVM.
Fixes: 1018faa6cf23 ("perf/x86/kvm: Fix Host-Only/Guest-Only counting with SVM disabled")
Signed-off-by: Dongli Si <sidongli1997@xxxxxxxxx>
---
arch/x86/events/amd/core.c | 19 ++++++++++++++++++-
arch/x86/events/perf_event.h | 3 +--
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 9687a8aef01c..5ac7d9410d36 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -533,7 +533,12 @@ static void amd_pmu_cpu_starting(int cpu)
struct amd_nb *nb;
int i, nb_id;
- cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
+ /*
+ * When SVM is disabled, set the Host-Only bit will cause the
+ * performance counter to not work.
+ */
+ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
if (!x86_pmu.amd_nb_constraints)
return;
@@ -1023,10 +1028,16 @@ __init int amd_pmu_init(void)
return 0;
}
+/*
+ * Unmask the Host-only bit when SVM is enabled on the Host Hypervisor
+ */
void amd_pmu_enable_virt(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return;
+
cpuc->perf_ctr_virt_mask = 0;
/* Reload all events */
@@ -1035,10 +1046,16 @@ void amd_pmu_enable_virt(void)
}
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
+/*
+ * Mask the Host-only bit when SVM is disabled on the Host Hypervisor
+ */
void amd_pmu_disable_virt(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return;
+
/*
* We only mask out the Host-only bit so that host-only counting works
* when SVM is disabled. If someone sets up a guest-only counter when
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 150261d929b9..fa1428ca60b6 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1138,10 +1138,9 @@ void x86_pmu_stop(struct perf_event *event, int flags);
static inline void x86_pmu_disable_event(struct perf_event *event)
{
- u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask);
struct hw_perf_event *hwc = &event->hw;
- wrmsrl(hwc->config_base, hwc->config & ~disable_mask);
+ wrmsrl(hwc->config_base, hwc->config);
if (is_counter_pair(hwc))
wrmsrl(x86_pmu_config_addr(hwc->idx + 1), 0);