[PATCH] KVM: nVMX: Do not dynamically set VMX instruction exit handlers

From: Sean Christopherson
Date: Mon Sep 23 2019 - 16:19:43 EST


Handle VMX instructions via a dedicated function and a switch statement
provided by the nVMX code instead of overwriting kvm_vmx_exit_handlers
when nested support is enabled. This will allow a future patch to make
kvm_vmx_exit_handlers a const, which in turn allows for better compiler
optimizations, e.g. direct calls instead of retpolined indirect calls.

Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
---
arch/x86/kvm/vmx/nested.c | 52 ++++++++++++++++++++++++++++-----------
arch/x86/kvm/vmx/nested.h | 3 ++-
arch/x86/kvm/vmx/vmx.c | 5 +++-
3 files changed, 44 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 6ce83c602e7f..41c7fcf28ab6 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -5072,6 +5072,43 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
return 1;
}

+int nested_vmx_handle_vmx_instruction(struct kvm_vcpu *vcpu)
+{
+ switch (to_vmx(vcpu)->exit_reason) {
+ case EXIT_REASON_VMCLEAR:
+ return handle_vmclear(vcpu);
+ case EXIT_REASON_VMLAUNCH:
+ return handle_vmlaunch(vcpu);
+ case EXIT_REASON_VMPTRLD:
+ return handle_vmptrld(vcpu);
+ case EXIT_REASON_VMPTRST:
+ return handle_vmptrst(vcpu);
+ case EXIT_REASON_VMREAD:
+ return handle_vmread(vcpu);
+ case EXIT_REASON_VMRESUME:
+ return handle_vmresume(vcpu);
+ case EXIT_REASON_VMWRITE:
+ return handle_vmwrite(vcpu);
+ case EXIT_REASON_VMOFF:
+ return handle_vmoff(vcpu);
+ case EXIT_REASON_VMON:
+ return handle_vmon(vcpu);
+ case EXIT_REASON_INVEPT:
+ return handle_invept(vcpu);
+ case EXIT_REASON_INVVPID:
+ return handle_invvpid(vcpu);
+ case EXIT_REASON_VMFUNC:
+ return handle_vmfunc(vcpu);
+ }
+
+ WARN_ON_ONCE(1);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror =
+ KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
+ vcpu->run->internal.ndata = 1;
+ vcpu->run->internal.data[0] = to_vmx(vcpu)->exit_reason;
+ return 0;
+}

static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
@@ -5972,7 +6009,7 @@ void nested_vmx_hardware_unsetup(void)
}
}

-__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
+__init int nested_vmx_hardware_setup(void)
{
int i;

@@ -5995,19 +6032,6 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
init_vmcs_shadow_fields();
}

- exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear,
- exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
- exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld,
- exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst,
- exit_handlers[EXIT_REASON_VMREAD] = handle_vmread,
- exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume,
- exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite,
- exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff,
- exit_handlers[EXIT_REASON_VMON] = handle_vmon,
- exit_handlers[EXIT_REASON_INVEPT] = handle_invept,
- exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid,
- exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc,
-
kvm_x86_ops->check_nested_events = vmx_check_nested_events;
kvm_x86_ops->get_nested_state = vmx_get_nested_state;
kvm_x86_ops->set_nested_state = vmx_set_nested_state;
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 187d39bf0bf1..0da48c83cccf 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -10,9 +10,10 @@ void vmx_leave_nested(struct kvm_vcpu *vcpu);
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
bool apicv);
void nested_vmx_hardware_unsetup(void);
-__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
+__init int nested_vmx_hardware_setup(void);
void nested_vmx_vcpu_setup(void);
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
+int nested_vmx_handle_vmx_instruction(struct kvm_vcpu *vcpu);
int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry);
bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 73bf9a2e6fb6..229b3a5e0695 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5459,6 +5459,9 @@ static int handle_preemption_timer(struct kvm_vcpu *vcpu)
*/
static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
{
+ if (nested)
+ return nested_vmx_handle_vmx_instruction(vcpu);
+
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
@@ -7631,7 +7634,7 @@ static __init int hardware_setup(void)
nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
vmx_capability.ept, enable_apicv);

- r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
+ r = nested_vmx_hardware_setup();
if (r)
return r;
}
--
2.22.0


--r5Pyd7+fXNt84Ff3--