[PATCH RFC v3 10/27] KVM: arm64: Rename SVE finalization constants to be more general

From: Mark Brown
Date: Fri Dec 20 2024 - 11:58:13 EST


Due to the overlap between SVE and SME vector length configuration
created by streaming mode SVE we will finalize both at once. Rename the
existing finalization to use _VEC (vector) for the naming to avoid
confusion.

Since this includes the userspace API we create an alias
KVM_ARM_VCPU_VEC for the existing KVM_ARM_VCPU_SVE capability, existing
code which does not enable SME will be unaffected and any SME only code
will not need to use SVE constants.

No functional change.

Signed-off-by: Mark Brown <broonie@xxxxxxxxxx>
---
arch/arm64/include/asm/kvm_host.h | 6 ++++--
arch/arm64/include/uapi/asm/kvm.h | 6 ++++++
arch/arm64/kvm/guest.c | 10 +++++-----
arch/arm64/kvm/hyp/nvhe/pkvm.c | 2 +-
arch/arm64/kvm/reset.c | 20 ++++++++++----------
5 files changed, 26 insertions(+), 18 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 97b617606221e8c11fd2b55d9636848d8453209f..f64ad573573cf000c4644f12f9e072a2fdfc3824 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -873,7 +873,7 @@ struct kvm_vcpu_arch {
/* KVM_ARM_VCPU_INIT completed */
#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
/* SVE config completed */
-#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
+#define VCPU_VEC_FINALIZED __vcpu_single_flag(cflags, BIT(1))

/* Exception pending */
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
@@ -948,6 +948,8 @@ struct kvm_vcpu_arch {
#define vcpu_has_sve(vcpu) kvm_has_sve((vcpu)->kvm)
#endif

+#define vcpu_has_vec(vcpu) vcpu_has_sve(vcpu)
+
#ifdef CONFIG_ARM64_PTR_AUTH
#define vcpu_has_ptrauth(vcpu) \
((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
@@ -1414,7 +1416,7 @@ struct kvm *kvm_arch_alloc_vm(void);
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);

-#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
+#define kvm_arm_vcpu_vec_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_VEC_FINALIZED)

#define kvm_has_mte(kvm) \
(system_supports_mte() && \
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 66736ff04011e0fa9fcfb74154d5613bf4ee89f7..9d80d22af9d4e00204f5096fb7c8c2ee8c3646c1 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -109,6 +109,12 @@ struct kvm_regs {
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
#define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */

+/*
+ * An alias for _SVE since we finalize VL configuration for both SVE and SME
+ * simultaneously.
+ */
+#define KVM_ARM_VCPU_VEC KVM_ARM_VCPU_SVE
+
struct kvm_vcpu_init {
__u32 target;
__u32 features[7];
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 12dad841f2a51276eee4d4da7400c1b2a5732ff8..62ff51d6e4584acc71205f5d4b1d2f3d2e2d2f88 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -342,7 +342,7 @@ static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (!vcpu_has_sve(vcpu))
return -ENOENT;

- if (kvm_arm_vcpu_sve_finalized(vcpu))
+ if (kvm_arm_vcpu_vec_finalized(vcpu))
return -EPERM; /* too late! */

if (WARN_ON(vcpu->arch.sve_state))
@@ -497,7 +497,7 @@ static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (ret)
return ret;

- if (!kvm_arm_vcpu_sve_finalized(vcpu))
+ if (!kvm_arm_vcpu_vec_finalized(vcpu))
return -EPERM;

if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset,
@@ -523,7 +523,7 @@ static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (ret)
return ret;

- if (!kvm_arm_vcpu_sve_finalized(vcpu))
+ if (!kvm_arm_vcpu_vec_finalized(vcpu))
return -EPERM;

if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr,
@@ -657,7 +657,7 @@ static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu)
return 0;

/* Policed by KVM_GET_REG_LIST: */
- WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+ WARN_ON(!kvm_arm_vcpu_vec_finalized(vcpu));

return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */)
+ 1; /* KVM_REG_ARM64_SVE_VLS */
@@ -675,7 +675,7 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
return 0;

/* Policed by KVM_GET_REG_LIST: */
- WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu));
+ WARN_ON(!kvm_arm_vcpu_vec_finalized(vcpu));

/*
* Enumerate this first, so that userspace can save/restore in
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 446a9114b0d3ee4323a9cd8d618d36035e85e4d0..0a4e1f5105592b23a0505bf7680c66e76b5c2a65 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -314,7 +314,7 @@ static void pkvm_vcpu_init_sve(struct pkvm_hyp_vcpu *hyp_vcpu, struct kvm_vcpu *
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;

if (!vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
- vcpu_clear_flag(vcpu, VCPU_SVE_FINALIZED);
+ vcpu_clear_flag(vcpu, VCPU_VEC_FINALIZED);
}

static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 803e11b0dc8f5eb74b07b0ad745b0c4f666713d5..ce726b1d4e8e90cfd4459a6cb9c67b8805424e22 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -92,7 +92,7 @@ static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
* Finalize vcpu's maximum SVE vector length, allocating
* vcpu->arch.sve_state as necessary.
*/
-static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
+static int kvm_vcpu_finalize_vec(struct kvm_vcpu *vcpu)
{
void *buf;
unsigned int vl;
@@ -122,21 +122,21 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
}

vcpu->arch.sve_state = buf;
- vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
+ vcpu_set_flag(vcpu, VCPU_VEC_FINALIZED);
return 0;
}

int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
{
switch (feature) {
- case KVM_ARM_VCPU_SVE:
- if (!vcpu_has_sve(vcpu))
+ case KVM_ARM_VCPU_VEC:
+ if (!vcpu_has_vec(vcpu))
return -EINVAL;

- if (kvm_arm_vcpu_sve_finalized(vcpu))
+ if (kvm_arm_vcpu_vec_finalized(vcpu))
return -EPERM;

- return kvm_vcpu_finalize_sve(vcpu);
+ return kvm_vcpu_finalize_vec(vcpu);
}

return -EINVAL;
@@ -144,7 +144,7 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)

bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
{
- if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
+ if (vcpu_has_vec(vcpu) && !kvm_arm_vcpu_vec_finalized(vcpu))
return false;

return true;
@@ -161,7 +161,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
kfree(vcpu->arch.ccsidr);
}

-static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
+static void kvm_vcpu_reset_vec(struct kvm_vcpu *vcpu)
{
if (vcpu_has_sve(vcpu))
memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
@@ -204,11 +204,11 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (loaded)
kvm_arch_vcpu_put(vcpu);

- if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
+ if (!kvm_arm_vcpu_vec_finalized(vcpu)) {
if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
kvm_vcpu_enable_sve(vcpu);
} else {
- kvm_vcpu_reset_sve(vcpu);
+ kvm_vcpu_reset_vec(vcpu);
}

if (vcpu_el1_is_32bit(vcpu))

--
2.39.5