[PATCH] [v2] x86, mpx: Workaround for MPX erratum
From: Dave Hansen
Date: Wed May 04 2016 - 16:54:06 EST
Changes from v1:
* Unconditionally enable workaround on all CPUs with MPX despite
whether we know it to be affected or not
* Add a pr_warn() when the workaround is active
--
This erratum essentially causes the CPU to forget which privilege
mode it is operating in (kernel vs. user) for the purposes of MPX.
This erratum can only be triggered when a system is not using
Supervisor Mode Execution Prevention (SMEP). Our workaround for
the erratum is to ensure that MPX can only be used in cases where
SMEP is present in the processor and enabled.
This erratum only affects Core processors. Atom is unaffected.
But, there is no architectural way to determine Atom vs. Core.
So, we just apply this workaround to all processors. It's
possible that it will mistakenly disable MPX on some Atom
processsors or future unaffected Core processors. There are
currently no processors that have MPX and not SMEP. It would
take something akin to a hypervisor masking SMEP out on an Atom
processor for this to present itself on current hardware.
More details:
http://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/desktop-6th-gen-core-family-spec-update.pdf
SKD046 Branch Instructions May Initialize MPX Bound Registers Incorrectly
Problem:
Depending on the current Intel MPX (Memory Protection
Extensions) configuration, execution of certain branch
instructions (near CALL, near RET, near JMP, and Jcc
instructions) without a BND prefix (F2H) initialize the MPX bound
registers. Due to this erratum, such a branch instruction that is
executed both with CPL = 3 and with CPL < 3 may not use the
correct MPX configuration register (BNDCFGU or BNDCFGS,
respectively) for determining whether to initialize the bound
registers; it may thus initialize the bound registers when it
should not, or fail to initialize them when it should.
Implication:
A branch instruction that has executed both in user mode and in
supervisor mode (from the same linear address) may cause a #BR
(bound range fault) when it should not have or may not cause a
#BR when it should have. Workaround An operating system can
avoid this erratum by setting CR4.SMEP[bit 20] to enable
supervisor-mode execution prevention (SMEP). When SMEP is
enabled, no code can be executed both with CPL = 3 and with CPL <
3.
Cc: x86@xxxxxxxxxx
Cc: luto@xxxxxxxxxxxxxx
Cc: torvalds@xxxxxxxxxxxxxxxxxxxx
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
---
b/arch/x86/include/asm/bugs.h | 9 +++++++--
b/arch/x86/kernel/cpu/common.c | 3 +++
b/arch/x86/kernel/cpu/intel.c | 37 +++++++++++++++++++++++++++++++++++++
3 files changed, 47 insertions(+), 2 deletions(-)
diff -puN arch/x86/include/asm/bugs.h~neversend-skl-mpx-errata-simple arch/x86/include/asm/bugs.h
--- a/arch/x86/include/asm/bugs.h~neversend-skl-mpx-errata-simple 2016-05-04 09:46:22.323444051 -0700
+++ b/arch/x86/include/asm/bugs.h 2016-05-04 09:46:22.329444321 -0700
@@ -3,10 +3,15 @@
extern void check_bugs(void);
-#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
+#if defined(CONFIG_CPU_SUP_INTEL)
+void check_mpx_erratum(struct cpuinfo_x86 *c);
+#else
+static inline void check_mpx_erratum(struct cpuinfo_x86 *c) {}
+#if defined(CONFIG_X86_32)
int ppro_with_ram_bug(void);
#else
static inline int ppro_with_ram_bug(void) { return 0; }
-#endif
+#endif /* CONFIG_X86_32 */
+#endif /* CONFIG_CPU_SUP_INTEL */
#endif /* _ASM_X86_BUGS_H */
diff -puN arch/x86/kernel/cpu/common.c~neversend-skl-mpx-errata-simple arch/x86/kernel/cpu/common.c
--- a/arch/x86/kernel/cpu/common.c~neversend-skl-mpx-errata-simple 2016-05-04 09:46:22.324444096 -0700
+++ b/arch/x86/kernel/cpu/common.c 2016-05-04 09:46:22.330444366 -0700
@@ -37,6 +37,7 @@
#include <asm/mtrr.h>
#include <linux/numa.h>
#include <asm/asm.h>
+#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/mce.h>
#include <asm/msr.h>
@@ -270,6 +271,8 @@ static inline void squash_the_stupid_ser
static __init int setup_disable_smep(char *arg)
{
setup_clear_cpu_cap(X86_FEATURE_SMEP);
+ /* also check for things that depend on SMEP being enabled */
+ check_mpx_erratum(&boot_cpu_data);
return 1;
}
__setup("nosmep", setup_disable_smep);
diff -puN arch/x86/kernel/cpu/intel.c~neversend-skl-mpx-errata-simple arch/x86/kernel/cpu/intel.c
--- a/arch/x86/kernel/cpu/intel.c~neversend-skl-mpx-errata-simple 2016-05-04 09:46:22.326444186 -0700
+++ b/arch/x86/kernel/cpu/intel.c 2016-05-04 10:54:08.923959470 -0700
@@ -25,6 +25,41 @@
#include <asm/apic.h>
#endif
+/*
+ * Just in case our CPU detection goes bad, allow a way to
+ * override the disabling of MPX.
+ */
+static int forcempx;
+static int __init forcempx_setup(char *__unused)
+{
+ forcempx = 1;
+ return 1;
+}
+__setup("intel-skd-046-workaround=disable", forcempx_setup);
+
+void check_mpx_erratum(struct cpuinfo_x86 *c)
+{
+ if (forcempx)
+ return;
+ /*
+ * Turn off MPX feature on CPUs where SMEP is not
+ * available or disabled.
+ *
+ * Works around Intel Erratum: SKD046 Branch Instructions
+ * May Initialize MPX Bound Registers Incorrectly.
+ *
+ * This might falsely-disable MPX on systems without
+ * SMEP, like Atom processors without SMEP. But, there
+ * are not any known places where that happens with
+ * actual hardware.
+ */
+ if (cpu_has(c, X86_FEATURE_MPX) &&
+ !cpu_has(c, X86_FEATURE_SMEP)) {
+ setup_clear_cpu_cap(X86_FEATURE_MPX);
+ pr_warn("x86/mpx: disabling since SMEP not present\n");
+ }
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -173,6 +208,8 @@ static void early_init_intel(struct cpui
if (edx & (1U << 28))
c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
}
+
+ check_mpx_erratum(c);
}
#ifdef CONFIG_X86_32
_