[PATCH v13 38/48] arm64: RMI: Enable PMU support with a realm guest
From: Steven Price
Date: Wed Mar 18 2026 - 13:28:13 EST
Use the PMU registers from the RmiRecExit structure to identify when an
overflow interrupt is due and inject it into the guest. Also hook up the
configuration option for enabling the PMU within the guest.
The number of PMU counters is configured by the VMM by writing to PMCR.N.
Signed-off-by: Steven Price <steven.price@xxxxxxx>
---
Changes since v12:
* RMMv2.0 no longer requires disaling the physical interrupt when
entering the guest with a PMU overflow interrupt active.
Changes since v2:
* Add a macro kvm_pmu_get_irq_level() to avoid compile issues when PMU
support is disabled.
---
arch/arm64/include/asm/kvm_rmi.h | 1 +
arch/arm64/kvm/arm.c | 2 ++
arch/arm64/kvm/guest.c | 7 +++++++
arch/arm64/kvm/pmu-emul.c | 3 +++
arch/arm64/kvm/rmi.c | 8 ++++++++
arch/arm64/kvm/sys_regs.c | 5 +++--
include/kvm/arm_pmu.h | 4 ++++
7 files changed, 28 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_rmi.h b/arch/arm64/include/asm/kvm_rmi.h
index d6cf87de099b..17bb7e2a2aa0 100644
--- a/arch/arm64/include/asm/kvm_rmi.h
+++ b/arch/arm64/include/asm/kvm_rmi.h
@@ -88,6 +88,7 @@ void kvm_init_rmi(void);
u32 kvm_realm_ipa_limit(void);
bool kvm_rmi_supports_sve(void);
+bool kvm_rmi_supports_pmu(void);
int kvm_init_realm_vm(struct kvm *kvm);
int kvm_activate_realm(struct kvm *kvm);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 3e6f1e810921..cd2cb5e54f21 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -388,6 +388,8 @@ static bool kvm_realm_ext_allowed(long ext)
case KVM_CAP_ARM_PTRAUTH_GENERIC:
case KVM_CAP_ARM_RMI:
return true;
+ case KVM_CAP_ARM_PMU_V3:
+ return kvm_rmi_supports_pmu();
case KVM_CAP_ARM_SVE:
return kvm_rmi_supports_sve();
}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index d9f392cb2759..14302130d341 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -735,6 +735,8 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
return kvm_arm_sys_reg_get_reg(vcpu, reg);
}
+#define KVM_REG_ARM_PMCR_EL0 ARM64_SYS_REG(3, 3, 9, 12, 0)
+
/*
* The RMI ABI only enables setting some GPRs and PC. The selection of GPRs
* that are available depends on the Realm state and the reason for the last
@@ -749,6 +751,11 @@ static bool validate_realm_set_reg(struct kvm_vcpu *vcpu,
u64 off = core_reg_offset_from_id(reg->id);
return kvm_realm_validate_core_reg(off);
+ } else {
+ switch (reg->id) {
+ case KVM_REG_ARM_PMCR_EL0:
+ return true;
+ }
}
return false;
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 93cc9bbb5cec..450b0eac20f8 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -374,6 +374,9 @@ static bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
{
u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
+ if (vcpu_is_rec(vcpu))
+ return vcpu->arch.rec.run->exit.pmu_ovf_status;
+
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
/*
diff --git a/arch/arm64/kvm/rmi.c b/arch/arm64/kvm/rmi.c
index 7cc6dc14d325..8dc090da6e5f 100644
--- a/arch/arm64/kvm/rmi.c
+++ b/arch/arm64/kvm/rmi.c
@@ -38,6 +38,11 @@ bool kvm_rmi_supports_sve(void)
return rmi_has_feature(RMI_FEATURE_REGISTER_0_SVE);
}
+bool kvm_rmi_supports_pmu(void)
+{
+ return rmi_has_feature(RMI_FEATURE_REGISTER_0_PMU);
+}
+
static int rmi_check_version(void)
{
struct arm_smccc_res res;
@@ -1431,6 +1436,9 @@ static int kvm_create_rec(struct kvm_vcpu *vcpu)
if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2))
return -EINVAL;
+ if (vcpu->kvm->arch.arm_pmu && !kvm_vcpu_has_pmu(vcpu))
+ return -EINVAL;
+
BUILD_BUG_ON(sizeof(*params) > PAGE_SIZE);
BUILD_BUG_ON(sizeof(*rec->run) > PAGE_SIZE);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index a7cd0badc20c..46f5e2ab3e2c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1360,8 +1360,9 @@ static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
* implements. Ignore this error to maintain compatibility
* with the existing KVM behavior.
*/
- if (!kvm_vm_has_ran_once(kvm) &&
- !vcpu_has_nv(vcpu) &&
+ if (!kvm_vm_has_ran_once(kvm) &&
+ !kvm_realm_is_created(kvm) &&
+ !vcpu_has_nv(vcpu) &&
new_n <= kvm_arm_pmu_get_max_counters(kvm))
kvm->arch.nr_pmu_counters = new_n;
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 96754b51b411..da32f1bd9f8c 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -70,6 +70,8 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_resync_el0(void);
+#define kvm_pmu_get_irq_level(vcpu) ((vcpu)->arch.pmu.irq_level)
+
#define kvm_vcpu_has_pmu(vcpu) \
(vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3))
@@ -157,6 +159,8 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
return 0;
}
+#define kvm_pmu_get_irq_level(vcpu) (false)
+
#define kvm_vcpu_has_pmu(vcpu) ({ false; })
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
--
2.43.0