[PATCH 2/2] KVM: VMX: Handle preemption timer fastpath

From: Wanpeng Li
Date: Tue Apr 21 2020 - 07:20:50 EST


From: Wanpeng Li <wanpengli@xxxxxxxxxxx>

This patch implements handle preemption timer fastpath, after timer fire
due to VMX-preemption timer counts down to zero, handle it as soon as
possible and vmentry immediately without checking various kvm stuff when
possible.

Testing on SKX Server.

cyclictest in guest(w/o mwait exposed, adaptive advance lapic timer is default -1):

5632.75ns -> 4559.25ns, 19%

kvm-unit-test/vmexit.flat:

w/o APICv, w/o advance timer:
tscdeadline_immed: 4780.75 -> 3851 19.4%
tscdeadline: 7474 -> 6528.5 12.7%

w/o APICv, w/ adaptive advance timer default -1:
tscdeadline_immed: 4845.75 -> 3930.5 18.9%
tscdeadline: 6048 -> 5871.75 3%

w/ APICv, w/o avanced timer:
tscdeadline_immed: 2919 -> 2467.75 15.5%
tscdeadline: 5661.75 -> 5188.25 8.4%

w/ APICv, w/ adaptive advance timer default -1:
tscdeadline_immed: 3018.5 -> 2561 15.2%
tscdeadline: 4663.75 -> 4626.5 1%

Tested-by: Haiwei Li <lihaiwei@xxxxxxxxxxx>
Cc: Haiwei Li <lihaiwei@xxxxxxxxxxx>
Signed-off-by: Wanpeng Li <wanpengli@xxxxxxxxxxx>
---
arch/x86/kvm/vmx/vmx.c | 41 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 41 insertions(+)

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 7688e40..623c4a0 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6593,12 +6593,53 @@ static void vmx_fast_deliver_interrupt(struct kvm_vcpu *vcpu)
}
}

+static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu);
+
+static enum exit_fastpath_completion handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_lapic *apic = vcpu->arch.apic;
+ struct kvm_timer *ktimer = &apic->lapic_timer;
+
+ if (vmx_event_needs_reinjection(vcpu))
+ return EXIT_FASTPATH_NONE;
+
+ if (!vmx->req_immediate_exit &&
+ !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
+ if (!vmx_interrupt_allowed(vcpu) ||
+ !apic_lvtt_tscdeadline(apic) ||
+ vmx->rmode.vm86_active ||
+ is_smm(vcpu) ||
+ !kvm_apic_hw_enabled(apic))
+ return EXIT_FASTPATH_NONE;
+
+ if (!apic->lapic_timer.hv_timer_in_use)
+ return EXIT_FASTPATH_CONT_RUN;
+
+ WARN_ON(swait_active(&vcpu->wq));
+ vmx_cancel_hv_timer(vcpu);
+ apic->lapic_timer.hv_timer_in_use = false;
+
+ if (atomic_read(&apic->lapic_timer.pending))
+ return EXIT_FASTPATH_CONT_RUN;
+
+ ktimer->expired_tscdeadline = ktimer->tscdeadline;
+ vmx_fast_deliver_interrupt(vcpu);
+ ktimer->tscdeadline = 0;
+ return EXIT_FASTPATH_CONT_RUN;
+ }
+
+ return EXIT_FASTPATH_NONE;
+}
+
static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
if (!is_guest_mode(vcpu)) {
switch(to_vmx(vcpu)->exit_reason) {
case EXIT_REASON_MSR_WRITE:
return handle_fastpath_set_msr_irqoff(vcpu);
+ case EXIT_REASON_PREEMPTION_TIMER:
+ return handle_fastpath_preemption_timer(vcpu);
default:
return EXIT_FASTPATH_NONE;
}
--
2.7.4