[PATCH 4/7] x86/idle: Disable IBRS entering idle and enable it on wakeup
From: Tim Chen
Date: Thu Jan 04 2018 - 13:18:57 EST
Clear IBRS on idle entry and set it on idle exit into kernel on mwait.
When we are in mwait, we are not running but if we leave IBRS on,
it will affect the performance on the sibling hardware thread. So
we disable IBRS and reenable it when we wake up.
Modify my original implementation of IBRS on mwait idle path based on input from
Andrea Arcangeli.
Signed-off-by: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
---
arch/x86/include/asm/mwait.h | 19 +++++++++++++++++++
arch/x86/include/asm/spec_ctrl.h | 37 +++++++++++++++++++++++++++++++++++++
arch/x86/kernel/process.c | 9 +++++++--
3 files changed, 63 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 39a2fb2..5cb3bff 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -6,6 +6,7 @@
#include <linux/sched/idle.h>
#include <asm/cpufeature.h>
+#include <asm/spec_ctrl.h>
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
@@ -100,15 +101,33 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{
if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
+ bool can_toggle_ibrs = false;
if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
mb();
clflush((void *)¤t_thread_info()->flags);
mb();
}
+ if (irqs_disabled()) {
+ /*
+ * CPUs run faster with speculation protection
+ * disabled. All CPU threads in a core must
+ * disable speculation protection for it to be
+ * disabled. Disable it while we are idle so the
+ * other hyperthread can run fast.
+ *
+ * nmi uses the save_paranoid model which
+ * always enables ibrs on exception entry
+ * before any indirect jump can run.
+ */
+ can_toggle_ibrs = true;
+ unprotected_speculation_begin();
+ }
__monitor((void *)¤t_thread_info()->flags, 0, 0);
if (!need_resched())
__mwait(eax, ecx);
+ if (can_toggle_ibrs)
+ unprotected_speculation_end();
}
current_clr_polling();
}
diff --git a/arch/x86/include/asm/spec_ctrl.h b/arch/x86/include/asm/spec_ctrl.h
index 16fc4f58..28b0314 100644
--- a/arch/x86/include/asm/spec_ctrl.h
+++ b/arch/x86/include/asm/spec_ctrl.h
@@ -76,5 +76,42 @@
10:
.endm
+#else
+#include <asm/microcode.h>
+
+static inline void __disable_indirect_speculation(void)
+{
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_FEATURE_ENABLE_IBRS);
+}
+
+static inline void __enable_indirect_speculation(void)
+{
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_FEATURE_DISABLE_IBRS);
+}
+
+/*
+ * Interrupts must be disabled to begin unprotected speculation.
+ * Otherwise interrupts could be running in unprotected mode.
+ */
+static inline void unprotected_speculation_begin(void)
+{
+ WARN_ON_ONCE(!irqs_disabled());
+ if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+ __enable_indirect_speculation();
+}
+
+static inline void unprotected_speculation_end(void)
+{
+ if (boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+ __disable_indirect_speculation();
+ else
+ /*
+ * If we intended to disable indirect speculation
+ * but come here due to mis-speculation, we need
+ * to stop the mis-speculation with rmb.
+ */
+ rmb();
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SPEC_CTRL_H */
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 5174159..cb14820 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -39,6 +39,7 @@
#include <asm/switch_to.h>
#include <asm/desc.h>
#include <asm/prctl.h>
+#include <asm/spec_ctrl.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -461,11 +462,15 @@ static __cpuidle void mwait_idle(void)
mb(); /* quirk */
}
+ unprotected_speculation_begin();
__monitor((void *)¤t_thread_info()->flags, 0, 0);
- if (!need_resched())
+ if (!need_resched()) {
__sti_mwait(0, 0);
- else
+ unprotected_speculation_end();
+ } else {
+ unprotected_speculation_end();
local_irq_enable();
+ }
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
--
2.9.4