[PATCH v2 3/5] KVM: x86/pmu: Add KVM_PMU_CALL() to simplify static calls of kvm_pmu_ops

From: Wei Wang
Date: Fri Apr 19 2024 - 07:30:48 EST


Similar to KVM_X86_CALL(), KVM_PMU_CALL() is added to streamline the usage
of static calls of kvm_pmu_ops. This improves code readability and
maintainability, while adhering to the coding style guidelines.

Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx>
Signed-off-by: Wei Wang <wei.w.wang@xxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/pmu.c | 24 ++++++++++++------------
2 files changed, 13 insertions(+), 12 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7249a3a2bbb1..3a2e42bb6969 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1853,6 +1853,7 @@ extern bool __read_mostly enable_apicv;
extern struct kvm_x86_ops kvm_x86_ops;

#define KVM_X86_CALL(func, ...) static_call(kvm_x86_##func)(__VA_ARGS__)
+#define KVM_PMU_CALL(func, ...) static_call(kvm_x86_pmu_##func)(__VA_ARGS__)

#define KVM_X86_OP(func) \
DECLARE_STATIC_CALL(kvm_x86_##func, *(((struct kvm_x86_ops *)0)->func));
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 1d456b69ddc6..c6f8e9ab2866 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -542,7 +542,7 @@ int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
if (!kvm_pmu_ops.check_rdpmc_early)
return 0;

- return static_call(kvm_x86_pmu_check_rdpmc_early)(vcpu, idx);
+ return KVM_PMU_CALL(check_rdpmc_early, vcpu, idx);
}

bool is_vmware_backdoor_pmc(u32 pmc_idx)
@@ -591,7 +591,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
if (is_vmware_backdoor_pmc(idx))
return kvm_pmu_rdpmc_vmware(vcpu, idx, data);

- pmc = static_call(kvm_x86_pmu_rdpmc_ecx_to_pmc)(vcpu, idx, &mask);
+ pmc = KVM_PMU_CALL(rdpmc_ecx_to_pmc, vcpu, idx, &mask);
if (!pmc)
return 1;

@@ -607,7 +607,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{
if (lapic_in_kernel(vcpu)) {
- static_call(kvm_x86_pmu_deliver_pmi)(vcpu);
+ KVM_PMU_CALL(deliver_pmi, vcpu);
kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
}
}
@@ -622,14 +622,14 @@ bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
default:
break;
}
- return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
- static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
+ return KVM_PMU_CALL(msr_idx_to_pmc, vcpu, msr) ||
+ KVM_PMU_CALL(is_valid_msr, vcpu, msr);
}

static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
+ struct kvm_pmc *pmc = KVM_PMU_CALL(msr_idx_to_pmc, vcpu, msr);

if (pmc)
__set_bit(pmc->idx, pmu->pmc_in_use);
@@ -654,7 +654,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0;
break;
default:
- return static_call(kvm_x86_pmu_get_msr)(vcpu, msr_info);
+ return KVM_PMU_CALL(get_msr, vcpu, msr_info);
}

return 0;
@@ -713,7 +713,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
default:
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
- return static_call(kvm_x86_pmu_set_msr)(vcpu, msr_info);
+ return KVM_PMU_CALL(set_msr, vcpu, msr_info);
}

return 0;
@@ -740,7 +740,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)

pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;

- static_call(kvm_x86_pmu_reset)(vcpu);
+ KVM_PMU_CALL(reset, vcpu);
}


@@ -778,7 +778,7 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
if (!vcpu->kvm->arch.enable_pmu)
return;

- static_call(kvm_x86_pmu_refresh)(vcpu);
+ KVM_PMU_CALL(refresh, vcpu);

/*
* At RESET, both Intel and AMD CPUs set all enable bits for general
@@ -796,7 +796,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);

memset(pmu, 0, sizeof(*pmu));
- static_call(kvm_x86_pmu_init)(vcpu);
+ KVM_PMU_CALL(init, vcpu);
kvm_pmu_refresh(vcpu);
}

@@ -818,7 +818,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
pmc_stop_counter(pmc);
}

- static_call(kvm_x86_pmu_cleanup)(vcpu);
+ KVM_PMU_CALL(cleanup, vcpu);

bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
}
--
2.27.0