[PATCH 1/2] KVM: X86: Single target IPI fastpath
From: Wanpeng Li
Date: Sat Nov 09 2019 - 02:06:14 EST
From: Wanpeng Li <wanpengli@xxxxxxxxxxx>
This patch tries to optimize x2apic physical destination mode, fixed delivery
mode single target IPI by delivering IPI to receiver immediately after sender
writes ICR vmexit to avoid various checks when possible.
Testing on Xeon Skylake server:
The virtual IPI latency from sender send to receiver receive reduces more than
330+ cpu cycles.
Running hackbench(reschedule ipi) in the guest, the avg handle time of MSR_WRITE
caused vmexit reduces more than 1000+ cpu cycles:
Before patch:
VM-EXIT Samples Samples% Time% Min Time Max Time Avg time
MSR_WRITE 5417390 90.01% 16.31% 0.69us 159.60us 1.08us
After patch:
VM-EXIT Samples Samples% Time% Min Time Max Time Avg time
MSR_WRITE 6726109 90.73% 62.18% 0.48us 191.27us 0.58us
Signed-off-by: Wanpeng Li <wanpengli@xxxxxxxxxxx>
---
arch/x86/kvm/vmx/vmx.c | 39 +++++++++++++++++++++++++++++++++++++--
include/linux/kvm_host.h | 1 +
2 files changed, 38 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 5d21a4a..5c67061 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5924,7 +5924,9 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
}
}
- if (exit_reason < kvm_vmx_max_exit_handlers
+ if (vcpu->fast_vmexit)
+ return 1;
+ else if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu);
else {
@@ -6474,6 +6476,34 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
+static int handle_ipi_fastpath(struct kvm_vcpu *vcpu)
+{
+ u32 index;
+ u64 data;
+ int ret = 0;
+
+ if (lapic_in_kernel(vcpu) && apic_x2apic_mode(vcpu->arch.apic)) {
+ /*
+ * fastpath to IPI target
+ */
+ index = kvm_rcx_read(vcpu);
+ data = kvm_read_edx_eax(vcpu);
+
+ if (((index - APIC_BASE_MSR) << 4 == APIC_ICR) &&
+ ((data & KVM_APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
+ ((data & APIC_MODE_MASK) == APIC_DM_FIXED)) {
+
+ kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32));
+ ret = kvm_lapic_reg_write(vcpu->arch.apic, APIC_ICR, (u32)data);
+
+ if (ret == 0)
+ ret = kvm_skip_emulated_instruction(vcpu);
+ };
+ };
+
+ return ret;
+}
+
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6615,6 +6645,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
| (1 << VCPU_EXREG_CR3));
vcpu->arch.regs_dirty = 0;
+ vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
+ vcpu->fast_vmexit = false;
+ if (!is_guest_mode(vcpu) &&
+ vmx->exit_reason == EXIT_REASON_MSR_WRITE)
+ vcpu->fast_vmexit = handle_ipi_fastpath(vcpu);
+
pt_guest_exit(vmx);
/*
@@ -6634,7 +6670,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->nested.nested_run_pending = 0;
vmx->idt_vectoring_info = 0;
- vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
kvm_machine_check();
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 719fc3e..7a7358b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -319,6 +319,7 @@ struct kvm_vcpu {
#endif
bool preempted;
bool ready;
+ bool fast_vmexit;
struct kvm_vcpu_arch arch;
struct dentry *debugfs_dentry;
};
--
2.7.4