Re: [PATCH v2 01/16] KVM: x86: Rename register accessors to be GPR-specific

From: Chang S. Bae

Date: Mon Mar 09 2026 - 19:29:00 EST


On 3/6/2026 5:32 PM, Chang S. Bae wrote:

The 'reg' argument has historically matched the index of the register cache array, vcpu::arch::regs[]. When extending the accessors to support EGPRs, it looked smooth to keep using it as a register ID, since that wires up cleanly with VMX instruction info and emulator sites. But then reg=16 immediately conflicts with RIP.

I think it is possible to introduce a dedicated field there, instead of regs[]. RIP appears to be switched by hardware on VM exit/entry anyway.

The attached draft take that:

* First, move RIP into the new field. Then kvm_register_read|write()
family effectively becomes GPR-only (while keeping the generic
'register' name).

* Second, the extra layer adds EGPR support. But it doesn't appear to
have measurable overhead, and it can be compiled out.---
arch/x86/include/asm/kvm_host.h | 5 +++--
arch/x86/kvm/kvm_cache_regs.h | 11 ++++++-----
arch/x86/kvm/svm/sev.c | 2 +-
arch/x86/kvm/svm/svm.c | 6 +++---
arch/x86/kvm/vmx/vmx.c | 4 ++--
5 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ff07c45e3c73..0b95126505ac 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -190,10 +190,10 @@ enum kvm_reg {
VCPU_REGS_R14 = __VCPU_REGS_R14,
VCPU_REGS_R15 = __VCPU_REGS_R15,
#endif
- VCPU_REGS_RIP,
NR_VCPU_REGS,
+ VCPU_REGS_RIP = NR_VCPU_REGS,

- VCPU_EXREG_PDPTR = NR_VCPU_REGS,
+ VCPU_EXREG_PDPTR,
VCPU_EXREG_CR0,
/*
* Alias AMD's ERAPS (not a real register) to CR3 so that common code
@@ -799,6 +799,7 @@ struct kvm_vcpu_arch {
* kvm_{register,rip}_{read,write} functions.
*/
unsigned long regs[NR_VCPU_REGS];
+ unsigned long rip;
u32 regs_avail;
u32 regs_dirty;

diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 8ddb01191d6f..33514affb90d 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -115,9 +115,6 @@ static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
return 0;

- if (!kvm_register_is_available(vcpu, reg))
- kvm_x86_call(cache_reg)(vcpu, reg);
-
return vcpu->arch.regs[reg];
}

@@ -133,12 +130,16 @@ static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,

static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
{
- return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
+ if (!kvm_register_is_available(vcpu, VCPU_REGS_RIP))
+ kvm_x86_call(cache_reg)(vcpu, VCPU_REGS_RIP);
+
+ return vcpu->arch.rip;
}

static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
{
- kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
+ vcpu->arch.rip = val;
+ kvm_register_mark_dirty(vcpu, VCPU_REGS_RIP);
}

static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 3f9c1aa39a0a..e1b892531b35 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -913,7 +913,7 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
#endif
- save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
+ save->rip = svm->vcpu.arch.rip;

/* Sync some non-GPR registers before encrypting */
save->xcr0 = svm->vcpu.arch.xcr0;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 8f8bc863e214..ea28cfaf30ff 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4294,7 +4294,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)

svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+ svm->vmcb->save.rip = vcpu->arch.rip;

/*
* Disable singlestep if we're injecting an interrupt/exception.
@@ -4378,7 +4378,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
vcpu->arch.cr2 = svm->vmcb->save.cr2;
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
- vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
+ vcpu->arch.rip = svm->vmcb->save.rip;
}
vcpu->arch.regs_dirty = 0;

@@ -4801,7 +4801,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)

svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
- svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+ svm->vmcb->save.rip = vcpu->arch.rip;

ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW);
if (ret)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 967b58a8ab9d..9132e53b02ae 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2641,7 +2641,7 @@ void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
break;
case VCPU_REGS_RIP:
- vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
+ vcpu->arch.rip = vmcs_readl(GUEST_RIP);
break;
case VCPU_EXREG_PDPTR:
if (enable_ept)
@@ -7646,7 +7646,7 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
- vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+ vmcs_writel(GUEST_RIP, vcpu->arch.rip);
vcpu->arch.regs_dirty = 0;

if (run_flags & KVM_RUN_LOAD_GUEST_DR6)
--
2.51.0

---
arch/x86/include/asm/kvm_host.h | 18 ++++++++++++
arch/x86/include/asm/kvm_vcpu_regs.h | 16 +++++++++++
arch/x86/kvm/Kconfig | 4 +++
arch/x86/kvm/x86.c | 43 ++++++++++++++++++++++++++++
arch/x86/kvm/x86.h | 28 +++++++++++++++++-
5 files changed, 108 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 0b95126505ac..b246a1a96c4e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -220,6 +220,24 @@ enum {
VCPU_SREG_GS,
VCPU_SREG_TR,
VCPU_SREG_LDTR,
+#ifdef CONFIG_KVM_APX
+ VCPU_XREG_R16 = __VCPU_XREG_R16,
+ VCPU_XREG_R17 = __VCPU_XREG_R17,
+ VCPU_XREG_R18 = __VCPU_XREG_R18,
+ VCPU_XREG_R19 = __VCPU_XREG_R19,
+ VCPU_XREG_R20 = __VCPU_XREG_R20,
+ VCPU_XREG_R21 = __VCPU_XREG_R21,
+ VCPU_XREG_R22 = __VCPU_XREG_R22,
+ VCPU_XREG_R23 = __VCPU_XREG_R23,
+ VCPU_XREG_R24 = __VCPU_XREG_R24,
+ VCPU_XREG_R25 = __VCPU_XREG_R25,
+ VCPU_XREG_R26 = __VCPU_XREG_R26,
+ VCPU_XREG_R27 = __VCPU_XREG_R27,
+ VCPU_XREG_R28 = __VCPU_XREG_R28,
+ VCPU_XREG_R29 = __VCPU_XREG_R29,
+ VCPU_XREG_R30 = __VCPU_XREG_R30,
+ VCPU_XREG_R31 = __VCPU_XREG_R31,
+#endif
};

enum exit_fastpath_completion {
diff --git a/arch/x86/include/asm/kvm_vcpu_regs.h b/arch/x86/include/asm/kvm_vcpu_regs.h
index 1af2cb59233b..dd0cc171f405 100644
--- a/arch/x86/include/asm/kvm_vcpu_regs.h
+++ b/arch/x86/include/asm/kvm_vcpu_regs.h
@@ -20,6 +20,22 @@
#define __VCPU_REGS_R13 13
#define __VCPU_REGS_R14 14
#define __VCPU_REGS_R15 15
+#define __VCPU_XREG_R16 16
+#define __VCPU_XREG_R17 17
+#define __VCPU_XREG_R18 18
+#define __VCPU_XREG_R19 19
+#define __VCPU_XREG_R20 20
+#define __VCPU_XREG_R21 21
+#define __VCPU_XREG_R22 22
+#define __VCPU_XREG_R23 23
+#define __VCPU_XREG_R24 24
+#define __VCPU_XREG_R25 25
+#define __VCPU_XREG_R26 26
+#define __VCPU_XREG_R27 27
+#define __VCPU_XREG_R28 28
+#define __VCPU_XREG_R29 29
+#define __VCPU_XREG_R30 30
+#define __VCPU_XREG_R31 31
#endif

#endif /* _ASM_X86_KVM_VCPU_REGS_H */
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 801bf9e520db..f27e3f2937f0 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -93,10 +93,14 @@ config KVM_SW_PROTECTED_VM

If unsure, say "N".

+config KVM_APX
+ bool
+
config KVM_INTEL
tristate "KVM for Intel (and compatible) processors support"
depends on KVM && IA32_FEAT_CTL
select X86_FRED if X86_64
+ select KVM_APX if X86_64
help
Provides support for KVM on processors equipped with Intel's VT
extensions, a.k.a. Virtual Machine Extensions (VMX).
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a03530795707..07119b4597dc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1261,6 +1261,49 @@ static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
}
#endif

+#ifdef CONFIG_KVM_APX
+
+static unsigned long kvm_read_egpr(int reg)
+{
+ return 0;
+}
+
+static void kvm_write_egpr(int reg, unsigned long data)
+{
+}
+
+static unsigned long kvm_register_read_ext(struct kvm_vcpu *vcpu, int reg)
+{
+ switch (reg) {
+ case VCPU_REGS_RAX ... VCPU_REGS_R15:
+ return kvm_register_read_raw(vcpu, reg);
+ case VCPU_XREG_R16 ... VCPU_XREG_R31:
+ return kvm_read_egpr(reg);
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_register_read_ext);
+
+static void kvm_register_write_ext(struct kvm_vcpu *vcpu, int reg, unsigned long val)
+{
+ switch (reg) {
+ case VCPU_REGS_RAX ... VCPU_REGS_R15:
+ kvm_register_write_raw(vcpu, reg, val);
+ break;
+ case VCPU_XREG_R16 ... VCPU_XREG_R31:
+ kvm_write_egpr(reg, val);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_register_write_ext);
+
+#endif /* CONFIG_KVM_APX */
+
int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
u64 xcr0 = xcr;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 94d4f07aaaa0..3447790849e7 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -411,6 +411,29 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return false;
}

+#ifdef CONFIG_KVM_APX
+
+unsigned long kvm_register_read_ext(struct kvm_vcpu *vcpu, int reg);
+void kvm_register_write_ext(struct kvm_vcpu *vcpu, int reg, unsigned long val);
+
+static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
+{
+ unsigned long val = kvm_register_read_ext(vcpu, reg);
+
+ return is_64_bit_mode(vcpu) ? val : (u32)val;
+}
+
+static inline void kvm_register_write(struct kvm_vcpu *vcpu,
+ int reg, unsigned long val)
+{
+ if (!is_64_bit_mode(vcpu))
+ val = (u32)val;
+
+ return kvm_register_write_ext(vcpu, reg, val);
+}
+
+#else
+
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
{
unsigned long val = kvm_register_read_raw(vcpu, reg);
@@ -419,13 +442,16 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
}

static inline void kvm_register_write(struct kvm_vcpu *vcpu,
- int reg, unsigned long val)
+ int reg, unsigned long val)
{
if (!is_64_bit_mode(vcpu))
val = (u32)val;
+
return kvm_register_write_raw(vcpu, reg, val);
}

+#endif
+
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
{
return !(kvm->arch.disabled_quirks & quirk);
--
2.51.0