[PATCH 3/4] KVM: x86/pmu: Reuse find_perf_hw_id() and drop find_fixed_event()

From: Like Xu
Date: Tue Nov 16 2021 - 07:21:58 EST


From: Like Xu <likexu@xxxxxxxxxxx>

Since we set the same semantic event value for the fixed counter in
pmc->eventsel, returning the perf_hw_id for the fixed counter via
find_fixed_event() can be painlessly replaced by find_perf_hw_id()
with the help of pmc_is_fixed() check.

Signed-off-by: Like Xu <likexu@xxxxxxxxxxx>
---
arch/x86/kvm/pmu.c | 2 +-
arch/x86/kvm/pmu.h | 1 -
arch/x86/kvm/svm/pmu.c | 11 ++++-------
arch/x86/kvm/vmx/pmu_intel.c | 29 ++++++++++++++++-------------
4 files changed, 21 insertions(+), 22 deletions(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 903dc6a532cc..3c45467b4275 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -262,7 +262,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)

pmc->current_config = (u64)ctrl;
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
- kvm_x86_ops.pmu_ops->find_fixed_event(idx),
+ kvm_x86_ops.pmu_ops->find_perf_hw_id(pmc),
!(en_field & 0x2), /* exclude user */
!(en_field & 0x1), /* exclude kernel */
pmi, false, false);
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index e7a5d4b6fa94..354339710d0d 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -25,7 +25,6 @@ struct kvm_event_hw_type_mapping {

struct kvm_pmu_ops {
unsigned int (*find_perf_hw_id)(struct kvm_pmc *pmc);
- unsigned (*find_fixed_event)(int idx);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 1d31bd5c6803..eeaeb58d501b 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -140,6 +140,10 @@ static unsigned int amd_find_perf_hw_id(struct kvm_pmc *pmc)
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;

+ /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
+ if (pmc_is_fixed(pmc))
+ return PERF_COUNT_HW_MAX;
+
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
if (amd_event_mapping[i].eventsel == event_select
&& amd_event_mapping[i].unit_mask == unit_mask)
@@ -151,12 +155,6 @@ static unsigned int amd_find_perf_hw_id(struct kvm_pmc *pmc)
return amd_event_mapping[i].event_type;
}

-/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
-static unsigned amd_find_fixed_event(int idx)
-{
- return PERF_COUNT_HW_MAX;
-}
-
/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
* AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
*/
@@ -321,7 +319,6 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)

struct kvm_pmu_ops amd_pmu_ops = {
.find_perf_hw_id = amd_find_perf_hw_id,
- .find_fixed_event = amd_find_fixed_event,
.pmc_is_enabled = amd_pmc_is_enabled,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index f1cc6192ead7..8ba8b4ab1fb7 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -68,6 +68,19 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
reprogram_counter(pmu, bit);
}

+static inline unsigned int intel_find_fixed_event(int idx)
+{
+ u32 event;
+ size_t size = ARRAY_SIZE(fixed_pmc_events);
+
+ if (idx >= size)
+ return PERF_COUNT_HW_MAX;
+
+ event = fixed_pmc_events[array_index_nospec(idx, size)];
+ return intel_arch_events[event].event_type;
+}
+
+
static unsigned int intel_find_perf_hw_id(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -75,6 +88,9 @@ static unsigned int intel_find_perf_hw_id(struct kvm_pmc *pmc)
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;

+ if (pmc_is_fixed(pmc))
+ return intel_find_fixed_event(pmc->idx - INTEL_PMC_IDX_FIXED);
+
for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
if (intel_arch_events[i].eventsel == event_select
&& intel_arch_events[i].unit_mask == unit_mask
@@ -87,18 +103,6 @@ static unsigned int intel_find_perf_hw_id(struct kvm_pmc *pmc)
return intel_arch_events[i].event_type;
}

-static unsigned intel_find_fixed_event(int idx)
-{
- u32 event;
- size_t size = ARRAY_SIZE(fixed_pmc_events);
-
- if (idx >= size)
- return PERF_COUNT_HW_MAX;
-
- event = fixed_pmc_events[array_index_nospec(idx, size)];
- return intel_arch_events[event].event_type;
-}
-
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
{
@@ -722,7 +726,6 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)

struct kvm_pmu_ops intel_pmu_ops = {
.find_perf_hw_id = intel_find_perf_hw_id,
- .find_fixed_event = intel_find_fixed_event,
.pmc_is_enabled = intel_pmc_is_enabled,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
--
2.33.1