[PATCH v2 3/5] KVM: nSVM: Move sync'ing to vmcb12 cache after completing interrupts
From: Yosry Ahmed
Date: Wed Feb 11 2026 - 11:31:00 EST
nested_sync_control_from_vmcb02() sync's some fields from vmcb02 to the
cached vmcb12 after a VMRUN of L2, mainly to keep the cache up-to-date
for save/restore. However, NextRIP is sync'd separately after
completing interrupts, as svm_complete_soft_interrupt() may update it
(e.g. for soft IRQ re-injection).
Move the call to nested_sync_control_from_vmcb02() after completing
interrupts, moving the NextRIP sync (and the FIXME) inside it. This
keeps the sync code together, and puts the FIXME in a more adequate
location, as it applies to most/all fields sync'd by
nested_sync_control_from_vmcb02().
Moving the call is safe, as nothing in-between accesses any of the VMCB
fields sync'd by nested_sync_control_from_vmcb02(), except NextRIP.
Opportunistically make some whitespace fixes. No functional change
intended.
Signed-off-by: Yosry Ahmed <yosry.ahmed@xxxxxxxxx>
---
arch/x86/kvm/svm/nested.c | 10 ++++++++--
arch/x86/kvm/svm/svm.c | 26 ++++++++++----------------
2 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 9909ff237e5c..6a7c7c5b742a 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -519,9 +519,15 @@ void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
{
u32 mask;
- svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
- svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
+
+ /*
+ * FIXME: Rework svm_get_nested_state() to not pull data from the
+ * cache (except for maybe int_ctl).
+ */
+ svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
+ svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
svm->nested.ctl.int_state = svm->vmcb->control.int_state;
+ svm->nested.ctl.next_rip = svm->vmcb->control.next_rip;
/* Only a few fields of int_ctl are written by the processor. */
mask = V_IRQ_MASK | V_TPR_MASK;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 1073a32a96fa..458abead9d5b 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4399,17 +4399,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
sync_cr8_to_lapic(vcpu);
svm->next_rip = 0;
- if (is_guest_mode(vcpu)) {
- nested_sync_control_from_vmcb02(svm);
-
- /* Track VMRUNs that have made past consistency checking */
- if (svm->nested.nested_run_pending &&
- !svm_is_vmrun_failure(svm->vmcb->control.exit_code))
- ++vcpu->stat.nested_run;
-
- svm->nested.nested_run_pending = 0;
- }
-
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
/*
@@ -4438,12 +4427,17 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
/*
* Update the cache after completing interrupts to get an accurate
* NextRIP, e.g. when re-injecting a soft interrupt.
- *
- * FIXME: Rework svm_get_nested_state() to not pull data from the
- * cache (except for maybe int_ctl).
*/
- if (is_guest_mode(vcpu))
- svm->nested.ctl.next_rip = svm->vmcb->control.next_rip;
+ if (is_guest_mode(vcpu)) {
+ nested_sync_control_from_vmcb02(svm);
+
+ /* Track VMRUNs that have made past consistency checking */
+ if (svm->nested.nested_run_pending &&
+ !svm_is_vmrun_failure(svm->vmcb->control.exit_code))
+ ++vcpu->stat.nested_run;
+
+ svm->nested.nested_run_pending = 0;
+ }
return svm_exit_handlers_fastpath(vcpu);
}
--
2.53.0.239.g8d8fc8a987-goog