[PATCH 01/30] x86, mm: do not set _PAGE_USER for init_mm page tables
From: Dave Hansen
Date: Fri Nov 10 2017 - 14:31:27 EST
From: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
init_mm is for kernel-exclusive use. If someone is allocating page
tables for it, do not set _PAGE_USER on them.
Signed-off-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Reviewed-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Moritz Lipp <moritz.lipp@xxxxxxxxxxxxxx>
Cc: Daniel Gruss <daniel.gruss@xxxxxxxxxxxxxx>
Cc: Michael Schwarz <michael.schwarz@xxxxxxxxxxxxxx>
Cc: Richard Fellner <richard.fellner@xxxxxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Kees Cook <keescook@xxxxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: x86@xxxxxxxxxx
---
b/arch/x86/include/asm/pgalloc.h | 37 ++++++++++++++++++++++++++++++++-----
1 file changed, 32 insertions(+), 5 deletions(-)
diff -puN arch/x86/include/asm/pgalloc.h~kaiser-prep-clear-_PAGE_USER-for-init_mm arch/x86/include/asm/pgalloc.h
--- a/arch/x86/include/asm/pgalloc.h~kaiser-prep-clear-_PAGE_USER-for-init_mm 2017-11-10 11:22:04.991244960 -0800
+++ b/arch/x86/include/asm/pgalloc.h 2017-11-10 11:22:04.994244960 -0800
@@ -61,20 +61,41 @@ static inline void __pte_free_tlb(struct
___pte_free_tlb(tlb, pte);
}
+/*
+ * init_mm is for kernel-exclusive use. Any page tables that
+ * are setup for it should not be usable by userspace.
+ *
+ * This also *signals* to code (like KAISER) that this page table
+ * entry is for kernel-exclusive use.
+ */
+static inline pteval_t mm_pgtable_flags(struct mm_struct *mm)
+{
+ if (!mm || (mm == &init_mm))
+ return _KERNPG_TABLE;
+ return _PAGE_TABLE;
+}
+
static inline void pmd_populate_kernel(struct mm_struct *mm,
pmd_t *pmd, pte_t *pte)
{
+ /*
+ * Since we are populating a kernel pmd, always use
+ * _KERNPG_TABLE and ignore mm
+ */
+ pteval_t pgtable_flags = _KERNPG_TABLE;
+
paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
- set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+ set_pmd(pmd, __pmd(__pa(pte) | pgtable_flags));
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
struct page *pte)
{
+ pteval_t pgtable_flags = mm_pgtable_flags(mm);
unsigned long pfn = page_to_pfn(pte);
paravirt_alloc_pte(mm, pfn);
- set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
+ set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | pgtable_flags));
}
#define pmd_pgtable(pmd) pmd_page(pmd)
@@ -117,16 +138,20 @@ extern void pud_populate(struct mm_struc
#else /* !CONFIG_X86_PAE */
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
+ pteval_t pgtable_flags = mm_pgtable_flags(mm);
+
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
- set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+ set_pud(pud, __pud(__pa(pmd) | pgtable_flags));
}
#endif /* CONFIG_X86_PAE */
#if CONFIG_PGTABLE_LEVELS > 3
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
{
+ pteval_t pgtable_flags = mm_pgtable_flags(mm);
+
paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
- set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
+ set_p4d(p4d, __p4d(__pa(pud) | pgtable_flags));
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -155,8 +180,10 @@ static inline void __pud_free_tlb(struct
#if CONFIG_PGTABLE_LEVELS > 4
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
{
+ pteval_t pgtable_flags = mm_pgtable_flags(mm);
+
paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
- set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
+ set_pgd(pgd, __pgd(__pa(p4d) | pgtable_flags));
}
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
_