[PATCH v3 3/7] KVM: SVM: Move RAX legality check to SVM insn interception handlers
From: Yosry Ahmed
Date: Thu Mar 12 2026 - 20:11:12 EST
When #GP is intercepted by KVM, the #GP interception handler checks
whether the GPA in RAX is legal and reinjects the #GP accordingly.
Otherwise, it calls into the appropriate interception handler for
VMRUN/VMLOAD/VMSAVE. The intercept handlers do not check RAX.
However, according to the APM, the interception takes precedence
over #GP due to an invalid operand:
Generally, instruction intercepts are checked after simple exceptions
(such as #GP—when CPL is incorrect—or #UD) have been checked, but
before exceptions related to memory accesses (such as page faults) and
exceptions based on specific operand values.
Move the check into the interception handlers for VMRUN/VMLOAD/VMSAVE as
the CPU does not check RAX before the interception.
Opportunisitically make the non-SVM insn path in gp_interception() do an
early return to reduce intendation.
Signed-off-by: Yosry Ahmed <yosry@xxxxxxxxxx>
---
arch/x86/kvm/svm/nested.c | 5 +++++
arch/x86/kvm/svm/svm.c | 34 +++++++++++++++++-----------------
2 files changed, 22 insertions(+), 17 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 5ff01d2ac85e4..016bf88ec2def 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1115,6 +1115,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
vmcb12_gpa = svm->vmcb->save.rax;
+ if (!page_address_valid(vcpu, vmcb12_gpa)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
ret = nested_svm_copy_vmcb12_to_cache(vcpu, vmcb12_gpa);
if (ret) {
if (ret == -EFAULT) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 796a6887305d6..f019a3f7705ae 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2183,6 +2183,7 @@ static int intr_interception(struct kvm_vcpu *vcpu)
static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ u64 vmcb12_gpa = svm->vmcb->save.rax;
struct vmcb *vmcb12;
struct kvm_host_map map;
int ret;
@@ -2190,7 +2191,12 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
if (nested_svm_check_permissions(vcpu))
return 1;
- ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
+ if (!page_address_valid(vcpu, vmcb12_gpa)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
if (ret) {
if (ret == -EINVAL)
kvm_inject_gp(vcpu, 0);
@@ -2306,24 +2312,18 @@ static int gp_interception(struct kvm_vcpu *vcpu)
goto reinject;
opcode = svm_instr_opcode(vcpu);
+ if (opcode != NONE_SVM_INSTR)
+ return emulate_svm_instr(vcpu, opcode);
- if (opcode == NONE_SVM_INSTR) {
- if (!enable_vmware_backdoor)
- goto reinject;
-
- /*
- * VMware backdoor emulation on #GP interception only handles
- * IN{S}, OUT{S}, and RDPMC.
- */
- if (!is_guest_mode(vcpu))
- return kvm_emulate_instruction(vcpu,
- EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
- } else {
- if (!page_address_valid(vcpu, svm->vmcb->save.rax))
- goto reinject;
+ if (!enable_vmware_backdoor)
+ goto reinject;
- return emulate_svm_instr(vcpu, opcode);
- }
+ /*
+ * VMware backdoor emulation on #GP interception only handles
+ * IN{S}, OUT{S}, and RDPMC.
+ */
+ if (!is_guest_mode(vcpu))
+ return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
reinject:
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
--
2.53.0.851.ga537e3e6e9-goog