[PATCH 4.19 094/114] arm64: add sysfs vulnerability show for meltdown

From: Greg Kroah-Hartman
Date: Thu Oct 10 2019 - 04:56:09 EST


From: Jeremy Linton <jeremy.linton@xxxxxxx>

[ Upstream commit 1b3ccf4be0e7be8c4bd8522066b6cbc92591e912 ]

We implement page table isolation as a mitigation for meltdown.
Report this to userspace via sysfs.

Signed-off-by: Jeremy Linton <jeremy.linton@xxxxxxx>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@xxxxxxx>
Reviewed-by: Andre Przywara <andre.przywara@xxxxxxx>
Reviewed-by: Catalin Marinas <catalin.marinas@xxxxxxx>
Tested-by: Stefan Wahren <stefan.wahren@xxxxxxxx>
Signed-off-by: Will Deacon <will.deacon@xxxxxxx>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/arm64/kernel/cpufeature.c | 58 +++++++++++++++++++++++++++++++----------
1 file changed, 44 insertions(+), 14 deletions(-)

--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -889,7 +889,7 @@ static bool has_cache_dic(const struct a
return ctr & BIT(CTR_DIC_SHIFT);
}

-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static bool __meltdown_safe = true;
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
@@ -908,6 +908,16 @@ static bool unmap_kernel_at_el0(const st
{ /* sentinel */ }
};
char const *str = "command line option";
+ bool meltdown_safe;
+
+ meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+
+ /* Defer to CPU feature registers */
+ if (has_cpuid_feature(entry, scope))
+ meltdown_safe = true;
+
+ if (!meltdown_safe)
+ __meltdown_safe = false;

/*
* For reasons that aren't entirely clear, enabling KPTI on Cavium
@@ -919,6 +929,19 @@ static bool unmap_kernel_at_el0(const st
__kpti_forced = -1;
}

+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+ if (!__kpti_forced) {
+ str = "KASLR";
+ __kpti_forced = 1;
+ }
+ }
+
+ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+ pr_info_once("kernel page table isolation disabled by kernel configuration\n");
+ return false;
+ }
+
/* Forced? */
if (__kpti_forced) {
pr_info_once("kernel page table isolation forced %s by %s\n",
@@ -926,18 +949,10 @@ static bool unmap_kernel_at_el0(const st
return __kpti_forced > 0;
}

- /* Useful for KASLR robustness */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return true;
-
- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
- return false;
-
- /* Defer to CPU feature registers */
- return !has_cpuid_feature(entry, scope);
+ return !meltdown_safe;
}

+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static void
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{
@@ -962,6 +977,12 @@ kpti_install_ng_mappings(const struct ar

return;
}
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */

static int __init parse_kpti(char *str)
{
@@ -975,7 +996,6 @@ static int __init parse_kpti(char *str)
return 0;
}
early_param("kpti", parse_kpti);
-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */

#ifdef CONFIG_ARM64_HW_AFDBM
static inline void __cpu_enable_hw_dbm(void)
@@ -1196,7 +1216,6 @@ static const struct arm64_cpu_capabiliti
.field_pos = ID_AA64PFR0_EL0_SHIFT,
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
},
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
{
.desc = "Kernel page table isolation (KPTI)",
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -1212,7 +1231,6 @@ static const struct arm64_cpu_capabiliti
.matches = unmap_kernel_at_el0,
.cpu_enable = kpti_install_ng_mappings,
},
-#endif
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
@@ -1853,3 +1871,15 @@ void cpu_clear_disr(const struct arm64_c
/* Firmware may have left a deferred SError in this register. */
write_sysreg_s(0, SYS_DISR_EL1);
}
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (__meltdown_safe)
+ return sprintf(buf, "Not affected\n");
+
+ if (arm64_kernel_unmapped_at_el0())
+ return sprintf(buf, "Mitigation: PTI\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}