[PATCH 4.15 031/202] [Variant 3/Meltdown] arm64: entry: Add fake CPU feature for unmapping the kernel at EL0

From: Greg Kroah-Hartman
Date: Thu Feb 15 2018 - 10:40:03 EST


4.15-stable review patch. If anyone has any objections, please let me know.

------------------

From: Will Deacon <will.deacon@xxxxxxx>


Commit ea1e3de85e94 upstream.

Allow explicit disabling of the entry trampoline on the kernel command
line (kpti=off) by adding a fake CPU feature (ARM64_UNMAP_KERNEL_AT_EL0)
that can be used to toggle the alternative sequences in our entry code and
avoid use of the trampoline altogether if desired. This also allows us to
make use of a static key in arm64_kernel_unmapped_at_el0().

Reviewed-by: Mark Rutland <mark.rutland@xxxxxxx>
Tested-by: Laura Abbott <labbott@xxxxxxxxxx>
Tested-by: Shanker Donthineni <shankerd@xxxxxxxxxxxxxx>
Signed-off-by: Will Deacon <will.deacon@xxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/arm64/include/asm/cpucaps.h | 3 +-
arch/arm64/include/asm/mmu.h | 3 +-
arch/arm64/kernel/cpufeature.c | 41 +++++++++++++++++++++++++++++++++++++++
arch/arm64/kernel/entry.S | 9 ++++----
4 files changed, 50 insertions(+), 6 deletions(-)

--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -41,7 +41,8 @@
#define ARM64_WORKAROUND_CAVIUM_30115 20
#define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22
+#define ARM64_UNMAP_KERNEL_AT_EL0 23

-#define ARM64_NCAPS 23
+#define ARM64_NCAPS 24

#endif /* __ASM_CPUCAPS_H */
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -36,7 +36,8 @@ typedef struct {

static inline bool arm64_kernel_unmapped_at_el0(void)
{
- return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0);
+ return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
+ cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
}

extern void paging_init(void);
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -846,6 +846,40 @@ static bool has_no_fpsimd(const struct a
ID_AA64PFR0_FP_SHIFT) < 0;
}

+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
+ int __unused)
+{
+ /* Forced on command line? */
+ if (__kpti_forced) {
+ pr_info_once("kernel page table isolation forced %s by command line option\n",
+ __kpti_forced > 0 ? "ON" : "OFF");
+ return __kpti_forced > 0;
+ }
+
+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ return true;
+
+ return false;
+}
+
+static int __init parse_kpti(char *str)
+{
+ bool enabled;
+ int ret = strtobool(str, &enabled);
+
+ if (ret)
+ return ret;
+
+ __kpti_forced = enabled ? 1 : -1;
+ return 0;
+}
+__setup("kpti=", parse_kpti);
+#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -932,6 +966,13 @@ static const struct arm64_cpu_capabiliti
.def_scope = SCOPE_SYSTEM,
.matches = hyp_offset_low,
},
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ {
+ .capability = ARM64_UNMAP_KERNEL_AT_EL0,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = unmap_kernel_at_el0,
+ },
+#endif
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -74,6 +74,7 @@
.macro kernel_ventry, el, label, regsize = 64
.align 7
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+alternative_if ARM64_UNMAP_KERNEL_AT_EL0
.if \el == 0
.if \regsize == 64
mrs x30, tpidrro_el0
@@ -82,6 +83,7 @@
mov x30, xzr
.endif
.endif
+alternative_else_nop_endif
#endif

sub sp, sp, #S_FRAME_SIZE
@@ -323,10 +325,9 @@ alternative_else_nop_endif
ldr lr, [sp, #S_LR]
add sp, sp, #S_FRAME_SIZE // restore sp

-#ifndef CONFIG_UNMAP_KERNEL_AT_EL0
- eret
-#else
.if \el == 0
+alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
bne 4f
msr far_el1, x30
tramp_alias x30, tramp_exit_native
@@ -334,10 +335,10 @@ alternative_else_nop_endif
4:
tramp_alias x30, tramp_exit_compat
br x30
+#endif
.else
eret
.endif
-#endif
.endm

.macro irq_stack_entry