[PATCH 28/43] x86/mm/kaiser: Map CPU entry area

From: Ingo Molnar
Date: Fri Nov 24 2017 - 04:22:52 EST


From: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>

There is now a special 'struct cpu_entry' area that contains all
of the data needed to enter the kernel. It's mapped in the fixmap
area and contains:

* The GDT (hardware segment descriptor)
* The TSS (thread information structure that points the hardware
to the various stacks, and contains the entry stack).
* The entry trampoline code itself
* The exception stacks (aka IRQ stacks)

Signed-off-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Brian Gerst <brgerst@xxxxxxxxx>
Cc: Daniel Gruss <daniel.gruss@xxxxxxxxxxxxxx>
Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Cc: Kees Cook <keescook@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Michael Schwarz <michael.schwarz@xxxxxxxxxxxxxx>
Cc: Moritz Lipp <moritz.lipp@xxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Richard Fellner <richard.fellner@xxxxxxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: linux-mm@xxxxxxxxx
Link: http://lkml.kernel.org/r/20171123003453.D4CB33A9@xxxxxxxxxxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/x86/include/asm/kaiser.h | 6 ++++++
arch/x86/kernel/cpu/common.c | 4 ++++
arch/x86/mm/kaiser.c | 31 +++++++++++++++++++++++++++++++
include/linux/kaiser.h | 3 +++
4 files changed, 44 insertions(+)

diff --git a/arch/x86/include/asm/kaiser.h b/arch/x86/include/asm/kaiser.h
index 3c2cc71b4058..040cb096d29d 100644
--- a/arch/x86/include/asm/kaiser.h
+++ b/arch/x86/include/asm/kaiser.h
@@ -33,6 +33,12 @@
extern int kaiser_add_mapping(unsigned long addr, unsigned long size,
unsigned long flags);

+/**
+ * kaiser_add_mapping_cpu_entry - map the cpu entry area
+ * @cpu: the CPU for which the entry area is being mapped
+ */
+extern void kaiser_add_mapping_cpu_entry(int cpu);
+
/**
* kaiser_remove_mapping - remove a kernel mapping from the userpage tables
* @addr: the start address of the range
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3b6920c9fef7..d6bcf397b00d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/percpu.h>
+#include <linux/kaiser.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
@@ -584,6 +585,9 @@ static inline void setup_cpu_entry_area(int cpu)
__set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
__pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
#endif
+ /* CPU 0's mapping is done in kaiser_init() */
+ if (cpu)
+ kaiser_add_mapping_cpu_entry(cpu);
}

/* Load the original GDT from the per-cpu structure */
diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
index 7f7561e9971d..4665dd724efb 100644
--- a/arch/x86/mm/kaiser.c
+++ b/arch/x86/mm/kaiser.c
@@ -353,6 +353,26 @@ static void __init kaiser_init_all_pgds(void)
WARN_ON(__ret); \
} while (0)

+void kaiser_add_mapping_cpu_entry(int cpu)
+{
+ kaiser_add_user_map_early(get_cpu_gdt_ro(cpu), PAGE_SIZE,
+ __PAGE_KERNEL_RO);
+
+ /* includes the entry stack */
+ kaiser_add_user_map_early(&get_cpu_entry_area(cpu)->tss,
+ sizeof(get_cpu_entry_area(cpu)->tss),
+ __PAGE_KERNEL | _PAGE_GLOBAL);
+
+ /* Entry code, so needs to be EXEC */
+ kaiser_add_user_map_early(&get_cpu_entry_area(cpu)->entry_trampoline,
+ sizeof(get_cpu_entry_area(cpu)->entry_trampoline),
+ __PAGE_KERNEL_EXEC | _PAGE_GLOBAL);
+
+ kaiser_add_user_map_early(&get_cpu_entry_area(cpu)->exception_stacks,
+ sizeof(get_cpu_entry_area(cpu)->exception_stacks),
+ __PAGE_KERNEL | _PAGE_GLOBAL);
+}
+
extern char __per_cpu_user_mapped_start[], __per_cpu_user_mapped_end[];
/*
* If anything in here fails, we will likely die on one of the
@@ -390,6 +410,17 @@ void __init kaiser_init(void)
kaiser_add_user_map_early((void *)idt_descr.address,
sizeof(gate_desc) * NR_VECTORS,
__PAGE_KERNEL_RO | _PAGE_GLOBAL);
+
+ /*
+ * We delay CPU 0's mappings because these structures are
+ * created before the page allocator is up. Deferring it
+ * until here lets us use the plain page allocator
+ * unconditionally in the page table code above.
+ *
+ * This is OK because kaiser_init() is called long before
+ * we ever run userspace and need the KAISER mappings.
+ */
+ kaiser_add_mapping_cpu_entry(0);
}

int kaiser_add_mapping(unsigned long addr, unsigned long size,
diff --git a/include/linux/kaiser.h b/include/linux/kaiser.h
index 0fd800efa95c..77db4230a0dd 100644
--- a/include/linux/kaiser.h
+++ b/include/linux/kaiser.h
@@ -25,5 +25,8 @@ static inline int kaiser_add_mapping(unsigned long addr, unsigned long size,
return 0;
}

+static inline void kaiser_add_mapping_cpu_entry(int cpu)
+{
+}
#endif /* !CONFIG_KAISER */
#endif /* _INCLUDE_KAISER_H */
--
2.14.1