[patch V4 part 2 18/18] x86/kvm/svm: Move guest enter/exit into .noinstr.text
From: Thomas Gleixner
Date: Tue May 05 2020 - 10:14:57 EST
Move the functions which are inside the RCU off region into the
non-instrumentable text section.
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Cc: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
---
arch/x86/kvm/svm/svm.c | 102 ++++++++++++++++++++++++---------------------
arch/x86/kvm/svm/vmenter.S | 2
2 files changed, 57 insertions(+), 47 deletions(-)
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3278,6 +3278,61 @@ static void svm_cancel_injection(struct
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
+static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
+ struct vcpu_svm *svm)
+{
+ /*
+ * VMENTER enables interrupts (host state), but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about
+ * it. This is the same logic as for exit_to_user_mode().
+ *
+ * 1) Trace interrupts on state
+ * 2) Prepare lockdep with RCU on
+ * 3) Invoke context tracking if enabled to adjust RCU state
+ * 4) Tell lockdep that interrupts are enabled
+ *
+ * This has to be after x86_spec_ctrl_set_guest() because that can
+ * take locks (lockdep needs RCU) and calls into world and some
+ * more.
+ */
+ instr_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instr_end();
+ guest_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+
+ __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
+
+#ifdef CONFIG_X86_64
+ native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+#else
+ loadsegment(fs, svm->host.fs);
+#ifndef CONFIG_X86_32_LAZY_GS
+ loadsegment(gs, svm->host.gs);
+#endif
+#endif
+
+ /*
+ * VMEXIT disables interrupts (host state, see the CLI in the ASM
+ * above), but tracing and lockdep have them in state 'on'. Same as
+ * enter_from_user_mode().
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * This needs to be done before the below as native_read_msr()
+ * contains a tracepoint and x86_spec_ctrl_restore_host() calls
+ * into world and some more.
+ */
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ guest_exit_irqoff();
+ instr_begin();
+ trace_hardirqs_off_prepare();
+ instr_end();
+}
+
static void svm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3330,52 +3385,7 @@ static void svm_vcpu_run(struct kvm_vcpu
*/
x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
- /*
- * VMENTER enables interrupts (host state), but the kernel state is
- * interrupts disabled when this is invoked. Also tell RCU about
- * it. This is the same logic as for exit_to_user_mode().
- *
- * 1) Trace interrupts on state
- * 2) Prepare lockdep with RCU on
- * 3) Invoke context tracking if enabled to adjust RCU state
- * 4) Tell lockdep that interrupts are enabled
- *
- * This has to be after x86_spec_ctrl_set_guest() because that can
- * take locks (lockdep needs RCU) and calls into world and some
- * more.
- */
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- guest_enter_irqoff();
- lockdep_hardirqs_on(CALLER_ADDR0);
-
- __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
-
-#ifdef CONFIG_X86_64
- wrmsrl(MSR_GS_BASE, svm->host.gs_base);
-#else
- loadsegment(fs, svm->host.fs);
-#ifndef CONFIG_X86_32_LAZY_GS
- loadsegment(gs, svm->host.gs);
-#endif
-#endif
-
- /*
- * VMEXIT disables interrupts (host state, see the CLI in the ASM
- * above), but tracing and lockdep have them in state 'on'. Same as
- * enter_from_user_mode().
- *
- * 1) Tell lockdep that interrupts are disabled
- * 2) Invoke context tracking if enabled to reactivate RCU
- * 3) Trace interrupts off state
- *
- * This needs to be done before the below as native_read_msr()
- * contains a tracepoint and x86_spec_ctrl_restore_host() calls
- * into world and some more.
- */
- lockdep_hardirqs_off(CALLER_ADDR0);
- guest_exit_irqoff();
- trace_hardirqs_off_prepare();
+ svm_vcpu_enter_exit(vcpu, svm);
/*
* We do not use IBRS in the kernel. If this vCPU has used the
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -27,7 +27,7 @@
#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
#endif
- .text
+.section .noinstr.text, "ax"
/**
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode