[patch 34/60] x86/mm/kpti: Populate user PGD
From: Thomas Gleixner
Date: Mon Dec 04 2017 - 11:58:18 EST
From: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Populate the PGD entries in the init user PGD which cover the kernel half
of the address space. This makes sure that the installment of the user
visible kernel mappings finds a populated PGD.
In clone_pgd_range() copy the init user PGDs which cover the kernel half of
the address space, so a process has all the required kernel mappings
visible.
[ tglx: Split out from the big kaiser dump ]
Signed-off-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
arch/x86/include/asm/pgtable.h | 5 +++++
arch/x86/mm/kpti.c | 41 +++++++++++++++++++++++++++++++++++++++++
2 files changed, 46 insertions(+)
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1116,6 +1116,11 @@ static inline void pmdp_set_wrprotect(st
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
memcpy(dst, src, count * sizeof(pgd_t));
+#ifdef CONFIG_KERNEL_PAGE_TABLE_ISOLATION
+ /* Clone the user space pgd as well */
+ memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
+ count * sizeof(pgd_t));
+#endif
}
#define PTE_SHIFT ilog2(PTRS_PER_PTE)
--- a/arch/x86/mm/kpti.c
+++ b/arch/x86/mm/kpti.c
@@ -65,6 +65,45 @@ void __init kpti_check_boottime_disable(
}
/*
+ * Ensure that the top level of the user page tables are entirely
+ * populated. This ensures that all processes that get forked have the
+ * same entries. This way, we do not have to ever go set up new entries in
+ * older processes.
+ *
+ * Note: we never free these, so there are no updates to them after this.
+ */
+static void __init kpti_init_all_pgds(void)
+{
+ pgd_t *pgd;
+ int i;
+
+ pgd = kernel_to_user_pgdp(pgd_offset_k(0UL));
+ for (i = PTRS_PER_PGD / 2; i < PTRS_PER_PGD; i++) {
+ /*
+ * Each PGD entry moves up PGDIR_SIZE bytes through the
+ * address space, so get the first virtual address mapped
+ * by PGD #i:
+ */
+ unsigned long addr = i * PGDIR_SIZE;
+#if CONFIG_PGTABLE_LEVELS > 4
+ p4d_t *p4d = p4d_alloc_one(&init_mm, addr);
+ if (!p4d) {
+ WARN_ON(1);
+ break;
+ }
+ set_pgd(pgd + i, __pgd(_KERNPG_TABLE | __pa(p4d)));
+#else /* CONFIG_PGTABLE_LEVELS <= 4 */
+ pud_t *pud = pud_alloc_one(&init_mm, addr);
+ if (!pud) {
+ WARN_ON(1);
+ break;
+ }
+ set_pgd(pgd + i, __pgd(_KERNPG_TABLE | __pa(pud)));
+#endif /* CONFIG_PGTABLE_LEVELS */
+ }
+}
+
+/*
* Initialize kernel page table isolation
*/
void __init kpti_init(void)
@@ -73,4 +112,6 @@ void __init kpti_init(void)
return;
pr_info("enabled\n");
+
+ kpti_init_all_pgds();
}