[PATCH v1 11/27] KVM: arm64: Access elements of vcpu_gp_regs individually

From: Steffen Eiden

Date: Thu Apr 02 2026 - 00:45:42 EST


While for arm64 the members of vcpu_gp_regs are allocated continuous
this is not necessarily true for other architectures implementing ARM.

Let vcpu_gp_regs() no longer return the address of the user_pt_regs in
the vcpu context but the address of the gp-register array field in the
user_pt_reg struct.

Co-developed-by: Nina Schoetterl-Glausch <nsg@xxxxxxxxxxxxx>
Signed-off-by: Nina Schoetterl-Glausch <nsg@xxxxxxxxxxxxx>
Signed-off-by: Steffen Eiden <seiden@xxxxxxxxxxxxx>
---
arch/arm64/include/asm/kvm_emulate.h | 9 +++++++--
arch/arm64/include/asm/kvm_host.h | 2 +-
arch/arm64/kvm/hyp/exception.c | 7 +++++--
arch/arm64/kvm/hyp/include/hyp/adjust_pc.h | 4 ++--
arch/arm64/kvm/hyp/include/hyp/switch.h | 6 +++---
arch/arm64/kvm/reset.c | 3 ++-
include/kvm/arm64/kvm_emulate.h | 4 ++--
virt/kvm/arm64/guest.c | 6 +++---
8 files changed, 25 insertions(+), 16 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 39fa3a12730c..41eac2b5de14 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -95,12 +95,17 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)

static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{
- return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
+ return (unsigned long *)&vcpu->arch.ctxt.regs.pc;
}

static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{
- return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
+ return (unsigned long *)&vcpu->arch.ctxt.regs.pstate;
+}
+
+static __always_inline unsigned long *vcpu_sp_el0(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu->arch.ctxt.regs.sp;
}

static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ae9e507f2c7c..7e473b895740 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1050,7 +1050,7 @@ struct kvm_vcpu_arch {
#define vcpu_clear_on_unsupported_cpu(vcpu) \
vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)

-#define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
+#define vcpu_gp_regs(v) ((v)->arch.ctxt.regs.regs)

/*
* Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index bef40ddb16db..82611442a2d1 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -277,6 +277,9 @@ static const u8 return_offsets[8][2] = {
[7] = { 4, 4 }, /* FIQ, unused */
};

+#define OFFSETOF_PT_REG(__r) offsetof(struct user_pt_regs, __r)
+#define COMPAT_IDX(__c) ((OFFSETOF_PT_REG(__c) - OFFSETOF_PT_REG(regs[0])) / sizeof(u64))
+
static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
{
unsigned long spsr = *vcpu_cpsr(vcpu);
@@ -292,12 +295,12 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
switch(mode) {
case PSR_AA32_MODE_ABT:
__vcpu_write_spsr_abt(vcpu, host_spsr_to_spsr32(spsr));
- vcpu_gp_regs(vcpu)->compat_lr_abt = return_address;
+ vcpu_gp_regs(vcpu)[COMPAT_IDX(compat_lr_abt)] = return_address;
break;

case PSR_AA32_MODE_UND:
__vcpu_write_spsr_und(vcpu, host_spsr_to_spsr32(spsr));
- vcpu_gp_regs(vcpu)->compat_lr_und = return_address;
+ vcpu_gp_regs(vcpu)[COMPAT_IDX(compat_lr_und)] = return_address;
break;
}

diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index 15e1e5db73e1..4e4cb67824c0 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -20,11 +20,11 @@
static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
- vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
+ *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);

kvm_skip_instr(vcpu);

- write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
+ write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
}

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 2597e8bda867..79e6e6cc9f81 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -416,7 +416,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
static inline bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
- arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2);
+ arm64_mops_reset_regs(&vcpu->arch.ctxt.regs, vcpu->arch.fault.esr_el2);
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);

/*
@@ -857,7 +857,7 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu)
/*
* Check for the conditions of Cortex-A510's #2077057. When these occur
* SPSR_EL2 can't be trusted, but isn't needed either as it is
- * unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
+ * unchanged from the value in vcpu_gp_cpsr(vcpu).
* Are we single-stepping the guest, and took a PAC exception from the
* active-not-pending state?
*/
@@ -867,7 +867,7 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu)
ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);

- vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+ *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
}

/*
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 036bf2dff976..d039f1d7116a 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -219,12 +219,13 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)

/* Reset core registers */
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
+ *vcpu_pc(vcpu) = 0;
memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
vcpu->arch.ctxt.spsr_abt = 0;
vcpu->arch.ctxt.spsr_und = 0;
vcpu->arch.ctxt.spsr_irq = 0;
vcpu->arch.ctxt.spsr_fiq = 0;
- vcpu_gp_regs(vcpu)->pstate = pstate;
+ *vcpu_cpsr(vcpu) = pstate;

/* Reset system registers */
kvm_reset_sys_regs(vcpu);
diff --git a/include/kvm/arm64/kvm_emulate.h b/include/kvm/arm64/kvm_emulate.h
index 25322b95af21..0e16d18e53d2 100644
--- a/include/kvm/arm64/kvm_emulate.h
+++ b/include/kvm/arm64/kvm_emulate.h
@@ -77,14 +77,14 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
u8 reg_num)
{
- return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
+ return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)[reg_num];
}

static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val)
{
if (reg_num != 31)
- vcpu_gp_regs(vcpu)->regs[reg_num] = val;
+ vcpu_gp_regs(vcpu)[reg_num] = val;
}

static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm64/guest.c b/virt/kvm/arm64/guest.c
index 83e33e0143b9..e283a4456df8 100644
--- a/virt/kvm/arm64/guest.c
+++ b/virt/kvm/arm64/guest.c
@@ -81,16 +81,16 @@ static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
KVM_REG_ARM_CORE_REG(regs.regs[30]):
off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
off /= 2;
- return &vcpu_gp_regs(vcpu)->regs[off];
+ return &vcpu_gp_regs(vcpu)[off];

case KVM_REG_ARM_CORE_REG(regs.sp):
- return &vcpu_gp_regs(vcpu)->sp;
+ return vcpu_sp_el0(vcpu);

case KVM_REG_ARM_CORE_REG(regs.pc):
return vcpu_pc(vcpu);

case KVM_REG_ARM_CORE_REG(regs.pstate):
- return &vcpu_gp_regs(vcpu)->pstate;
+ return vcpu_cpsr(vcpu);

case KVM_REG_ARM_CORE_REG(sp_el1):
return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
--
2.51.0