[v7 03/10] arm64: add sysfs vulnerability show for meltdown

From: Jeremy Linton
Date: Wed Apr 10 2019 - 19:13:44 EST


Display the system vulnerability status. This means that
while its possible to have the mitigation enabled, the
sysfs entry won't indicate that status. This is because
the core ABI doesn't express the concept of mitigation
when the system isn't vulnerable.

Signed-off-by: Jeremy Linton <jeremy.linton@xxxxxxx>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@xxxxxxx>
Reviewed-by: Andre Przywara <andre.przywara@xxxxxxx>
Reviewed-by: Catalin Marinas <catalin.marinas@xxxxxxx>
Tested-by: Stefan Wahren <stefan.wahren@xxxxxxxx>
---
arch/arm64/kernel/cpufeature.c | 58 ++++++++++++++++++++++++++--------
1 file changed, 44 insertions(+), 14 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4061de10cea6..6b7e1556460a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -947,7 +947,7 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
return has_cpuid_feature(entry, scope);
}

-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static bool __meltdown_safe = true;
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */

static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
@@ -967,6 +967,16 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
{ /* sentinel */ }
};
char const *str = "command line option";
+ bool meltdown_safe;
+
+ meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
+
+ /* Defer to CPU feature registers */
+ if (has_cpuid_feature(entry, scope))
+ meltdown_safe = true;
+
+ if (!meltdown_safe)
+ __meltdown_safe = false;

/*
* For reasons that aren't entirely clear, enabling KPTI on Cavium
@@ -978,6 +988,19 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
__kpti_forced = -1;
}

+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
+ if (!__kpti_forced) {
+ str = "KASLR";
+ __kpti_forced = 1;
+ }
+ }
+
+ if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
+ pr_info_once("kernel page table isolation disabled by CONFIG\n");
+ return false;
+ }
+
/* Forced? */
if (__kpti_forced) {
pr_info_once("kernel page table isolation forced %s by %s\n",
@@ -985,18 +1008,10 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
return __kpti_forced > 0;
}

- /* Useful for KASLR robustness */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return kaslr_offset() > 0;
-
- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
- return false;
-
- /* Defer to CPU feature registers */
- return !has_cpuid_feature(entry, scope);
+ return !meltdown_safe;
}

+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static void
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{
@@ -1026,6 +1041,12 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)

return;
}
+#else
+static void
+kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
+{
+}
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */

static int __init parse_kpti(char *str)
{
@@ -1039,7 +1060,6 @@ static int __init parse_kpti(char *str)
return 0;
}
early_param("kpti", parse_kpti);
-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */

#ifdef CONFIG_ARM64_HW_AFDBM
static inline void __cpu_enable_hw_dbm(void)
@@ -1306,7 +1326,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = ID_AA64PFR0_EL0_SHIFT,
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
},
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
{
.desc = "Kernel page table isolation (KPTI)",
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
@@ -1322,7 +1341,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = unmap_kernel_at_el0,
.cpu_enable = kpti_install_ng_mappings,
},
-#endif
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
@@ -2101,3 +2119,15 @@ static int __init enable_mrs_emulation(void)
}

core_initcall(enable_mrs_emulation);
+
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if (__meltdown_safe)
+ return sprintf(buf, "Not affected\n");
+
+ if (arm64_kernel_unmapped_at_el0())
+ return sprintf(buf, "Mitigation: KPTI\n");
+
+ return sprintf(buf, "Vulnerable\n");
+}
--
2.20.1