[PATCH 09/15] KVM: arm64: Register/unregister perf callbacks at vcpu load/put
From: Sean Christopherson
Date: Thu Aug 26 2021 - 20:58:19 EST
Register/unregister perf callbacks at vcpu_load()/vcpu_put() instead of
keeping the callbacks registered for all eternity after loading KVM.
This will allow future cleanups and optimizations as the registration
of the callbacks signifies "in guest". This will also allow moving the
callbacks into common KVM as they arm64 and x86 now have semantically
identical callback implementations.
Note, KVM could likely be more precise in its registration, but that's a
cleanup for the future.
Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/arm64/include/asm/kvm_host.h | 12 ++++++++++-
arch/arm64/kvm/arm.c | 5 ++++-
arch/arm64/kvm/perf.c | 36 ++++++++++++++-----------------
3 files changed, 31 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ed940aec89e0..007c38d77fd9 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -671,7 +671,17 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
void kvm_perf_init(void);
-void kvm_perf_teardown(void);
+
+#ifdef CONFIG_PERF_EVENTS
+void kvm_register_perf_callbacks(void);
+static inline void kvm_unregister_perf_callbacks(void)
+{
+ __perf_unregister_guest_info_callbacks();
+}
+#else
+static inline void kvm_register_perf_callbacks(void) {}
+static inline void kvm_unregister_perf_callbacks(void) {}
+#endif
long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e9a2b8f27792..ec386971030d 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -429,10 +429,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (vcpu_has_ptrauth(vcpu))
vcpu_ptrauth_disable(vcpu);
kvm_arch_vcpu_load_debug_state_flags(vcpu);
+
+ kvm_register_perf_callbacks();
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ kvm_unregister_perf_callbacks();
kvm_arch_vcpu_put_debug_state_flags(vcpu);
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
@@ -2155,7 +2158,7 @@ int kvm_arch_init(void *opaque)
/* NOP: Compiling as a module not supported */
void kvm_arch_exit(void)
{
- kvm_perf_teardown();
+
}
static int __init early_kvm_mode_cfg(char *arg)
diff --git a/arch/arm64/kvm/perf.c b/arch/arm64/kvm/perf.c
index 039fe59399a2..2556b0a3b096 100644
--- a/arch/arm64/kvm/perf.c
+++ b/arch/arm64/kvm/perf.c
@@ -13,33 +13,30 @@
DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+#ifdef CONFIG_PERF_EVENTS
static int kvm_is_in_guest(void)
{
- return kvm_get_running_vcpu() != NULL;
+ return true;
}
static int kvm_is_user_mode(void)
{
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
- vcpu = kvm_get_running_vcpu();
+ if (WARN_ON_ONCE(!vcpu))
+ return 0;
- if (vcpu)
- return !vcpu_mode_priv(vcpu);
-
- return 0;
+ return !vcpu_mode_priv(vcpu);
}
static unsigned long kvm_get_guest_ip(void)
{
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
- vcpu = kvm_get_running_vcpu();
+ if (WARN_ON_ONCE(!vcpu))
+ return 0;
- if (vcpu)
- return *vcpu_pc(vcpu);
-
- return 0;
+ return *vcpu_pc(vcpu);
}
static struct perf_guest_info_callbacks kvm_guest_cbs = {
@@ -48,15 +45,14 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
.get_guest_ip = kvm_get_guest_ip,
};
+void kvm_register_perf_callbacks(void)
+{
+ __perf_register_guest_info_callbacks(&kvm_guest_cbs);
+}
+#endif /* CONFIG_PERF_EVENTS*/
+
void kvm_perf_init(void)
{
if (kvm_pmu_probe_pmuver() != 0xf && !is_protected_kvm_enabled())
static_branch_enable(&kvm_arm_pmu_available);
-
- perf_register_guest_info_callbacks(&kvm_guest_cbs);
-}
-
-void kvm_perf_teardown(void)
-{
- perf_unregister_guest_info_callbacks();
}
--
2.33.0.259.gc128427fd7-goog