[RFC PATCH 12/16] arm64: mm: Map p4d/pgd with privileged pkey

From: Kevin Brodsky
Date: Fri Dec 06 2024 - 05:17:27 EST


If CONFIG_KPKEYS_HARDENED_PGTABLES is enabled, map p4d/pgd pages
using a privileged pkey (KPKEYS_PKEY_PGTABLES), so that they can
only be written under guard(kpkeys_hardened_pgtables).

The case where pgd is not page-sized is not currently handled -
this is pending support for pkeys in kmem_cache.

This patch is a no-op if CONFIG_KPKEYS_HARDENED_PGTABLES is disabled
(default).

Signed-off-by: Kevin Brodsky <kevin.brodsky@xxxxxxx>
---
arch/arm64/include/asm/pgalloc.h | 21 ++++++++++++++++++---
arch/arm64/mm/pgd.c | 30 ++++++++++++++++++++++++++++--
2 files changed, 46 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index e75422864d1b..c006aecd6ba5 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -88,18 +88,33 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, p4d_t *p4dp)
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
{
gfp_t gfp = GFP_PGTABLE_USER;
+ int ret;

if (mm == &init_mm)
gfp = GFP_PGTABLE_KERNEL;
- return (p4d_t *)get_zeroed_page(gfp);
+
+ addr = get_zeroed_page(gfp);
+ if (!addr)
+ return NULL;
+
+ ret = kpkeys_protect_pgtable_memory(addr, 1);
+ if (ret) {
+ free_page(addr);
+ return NULL;
+ }
+
+ return (p4d_t *)addr;
}

static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
{
+ unsigned long addr = (unsigned long)p4d;
+
if (!pgtable_l5_enabled())
return;
- BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
- free_page((unsigned long)p4d);
+ BUG_ON(addr & (PAGE_SIZE-1));
+ kpkeys_unprotect_pgtable_memory(addr, 1);
+ free_page(addr);
}

#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 0c501cabc238..3577cc1821af 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -28,12 +28,38 @@ static bool pgdir_is_page_size(void)
return false;
}

+static pgd_t *pgd_page_alloc(gfp_t gfp)
+{
+ unsigned long addr;
+ int ret;
+
+ addr = __get_free_page(gfp);
+ if (!addr)
+ return NULL;
+
+ ret = kpkeys_protect_pgtable_memory(addr, 1);
+ if (ret) {
+ free_page(addr);
+ return NULL;
+ }
+
+ return (pgd_t *)addr;
+}
+
+static void pgd_page_free(pgd_t *pgd)
+{
+ unsigned long addr = (unsigned long)pgd;
+
+ kpkeys_unprotect_pgtable_memory(addr, 1);
+ free_page(addr);
+}
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
gfp_t gfp = GFP_PGTABLE_USER;

if (pgdir_is_page_size())
- return (pgd_t *)__get_free_page(gfp);
+ return pgd_page_alloc(gfp);
else
return kmem_cache_alloc(pgd_cache, gfp);
}
@@ -41,7 +67,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
if (pgdir_is_page_size())
- free_page((unsigned long)pgd);
+ pgd_page_free(pgd);
else
kmem_cache_free(pgd_cache, pgd);
}
--
2.47.0