Hi,
On 3/1/19 10:20 AM, Catalin Marinas wrote:
On Fri, Mar 01, 2019 at 10:12:09AM -0600, Jeremy Linton wrote:
On 3/1/19 1:11 AM, Andre Przywara wrote:
On 2/26/19 7:05 PM, Jeremy Linton wrote:
Display the mitigation status if active, otherwise
assume the cpu is safe unless it doesn't have CSV3
and isn't in our whitelist.
Signed-off-by: Jeremy Linton <jeremy.linton@xxxxxxx>
---
ÂÂ arch/arm64/kernel/cpufeature.c | 47 ++++++++++++++++++++++++++--------
ÂÂ 1 file changed, 37 insertions(+), 10 deletions(-)
diff --git a/arch/arm64/kernel/cpufeature.c
b/arch/arm64/kernel/cpufeature.c
index f6d84e2c92fe..d31bd770acba 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -944,7 +944,7 @@ has_useable_cnp(const struct
arm64_cpu_capabilities *entry, int scope)
ÂÂÂÂÂÂ return has_cpuid_feature(entry, scope);
ÂÂ }
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static bool __meltdown_safe = true;
ÂÂ static int __kpti_forced; /* 0: not forced, >0: forced on, <0:
forced off */
ÂÂ static bool unmap_kernel_at_el0(const struct
arm64_cpu_capabilities *entry,
@@ -963,6 +963,16 @@ static bool unmap_kernel_at_el0(const struct
arm64_cpu_capabilities *entry,
ÂÂÂÂÂÂÂÂÂÂ { /* sentinel */ }
ÂÂÂÂÂÂ };
ÂÂÂÂÂÂ char const *str = "command line option";
+ÂÂÂ bool meltdown_safe;
+
+ÂÂÂ meltdown_safe = is_midr_in_range_list(read_cpuid_id(),
kpti_safe_list);
+
+ÂÂÂ /* Defer to CPU feature registers */
+ÂÂÂ if (has_cpuid_feature(entry, scope))
+ÂÂÂÂÂÂÂ meltdown_safe = true;
+
+ÂÂÂ if (!meltdown_safe)
+ÂÂÂÂÂÂÂ __meltdown_safe = false;
ÂÂÂÂÂÂ /*
ÂÂÂÂÂÂÂ * For reasons that aren't entirely clear, enabling KPTI on Cavium
@@ -974,6 +984,11 @@ static bool unmap_kernel_at_el0(const struct
arm64_cpu_capabilities *entry,
ÂÂÂÂÂÂÂÂÂÂ __kpti_forced = -1;
ÂÂÂÂÂÂ }
+ÂÂÂ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+ÂÂÂÂÂÂÂ pr_info_once("kernel page table isolation disabled by
CONFIG\n");
+ÂÂÂÂÂÂÂ return false;
+ÂÂÂ }
+
ÂÂÂÂÂÂ /* Forced? */
ÂÂÂÂÂÂ if (__kpti_forced) {
ÂÂÂÂÂÂÂÂÂÂ pr_info_once("kernel page table isolation forced %s by %s\n",
@@ -985,14 +1000,10 @@ static bool unmap_kernel_at_el0(const struct
arm64_cpu_capabilities *entry,
ÂÂÂÂÂÂ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
ÂÂÂÂÂÂÂÂÂÂ return kaslr_offset() > 0;
-ÂÂÂ /* Don't force KPTI for CPUs that are not vulnerable */
-ÂÂÂ if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
-ÂÂÂÂÂÂÂ return false;
-
-ÂÂÂ /* Defer to CPU feature registers */
-ÂÂÂ return !has_cpuid_feature(entry, scope);
+ÂÂÂ return !meltdown_safe;
ÂÂ }
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
ÂÂ static void
ÂÂ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
ÂÂ {
@@ -1022,6 +1033,13 @@ kpti_install_ng_mappings(const struct
arm64_cpu_capabilities *__unused)
ÂÂÂÂÂÂ return;
ÂÂ }
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endifÂÂÂ /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
ÂÂ static int __init parse_kpti(char *str)
ÂÂ {
@@ -1035,7 +1053,6 @@ static int __init parse_kpti(char *str)
ÂÂÂÂÂÂ return 0;
ÂÂ }
ÂÂ early_param("kpti", parse_kpti);
-#endifÂÂÂ /* CONFIG_UNMAP_KERNEL_AT_EL0 */
ÂÂ #ifdef CONFIG_ARM64_HW_AFDBM
ÂÂ static inline void __cpu_enable_hw_dbm(void)
@@ -1286,7 +1303,6 @@ static const struct arm64_cpu_capabilities
arm64_features[] = {
ÂÂÂÂÂÂÂÂÂÂ .field_pos = ID_AA64PFR0_EL0_SHIFT,
ÂÂÂÂÂÂ ÂÂÂ .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
ÂÂÂÂÂÂ },
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
ÂÂÂÂÂÂ {
ÂÂÂÂÂÂÂÂÂÂ .desc = "Kernel page table isolation (KPTI)",
ÂÂÂÂÂÂÂÂÂÂ .capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -1302,7 +1318,6 @@ static const struct arm64_cpu_capabilities
arm64_features[] = {
ÂÂÂÂÂÂÂÂÂÂ .matches = unmap_kernel_at_el0,
ÂÂÂÂÂÂÂÂÂÂ .cpu_enable = kpti_install_ng_mappings,
ÂÂÂÂÂÂ },
-#endif
ÂÂÂÂÂÂ {
ÂÂÂÂÂÂÂÂÂÂ /* FP/SIMD is not implemented */
ÂÂÂÂÂÂÂÂÂÂ .capability = ARM64_HAS_NO_FPSIMD,
@@ -2063,3 +2078,15 @@ static int __init enable_mrs_emulation(void)
ÂÂ }
ÂÂ core_initcall(enable_mrs_emulation);
+
+ssize_t cpu_show_meltdown(struct device *dev, struct
device_attribute *attr,
+ÂÂÂÂÂÂÂ char *buf)
+{
+ÂÂÂ if (arm64_kernel_unmapped_at_el0())
+ÂÂÂÂÂÂÂ return sprintf(buf, "Mitigation: KPTI\n");
+
+ÂÂÂ if (__meltdown_safe)
+ÂÂÂÂÂÂÂ return sprintf(buf, "Not affected\n");
Shall those two checks be swapped? So it doesn't report about a KPTI
mitigation if the CPU is safe, but we enable KPTI because of KASLR
having enabled it? Or is that a different knob?
Hmmm, I think having it this way reflects the fact that the machine is
mitigated independent of whether it needed it. The force on case is similar.
The machine may not have needed the mitigation but it was forced on.
So is this patchset about showing vulnerabilities _and_ mitigations or
just one of them?
Well, I don't think there is a way to express a mitigated but not vulnerable state in the current ABI. This set is mostly just to bring us in line with the current ABI expectations.