[PATCH RFC 01/19] x86/mm: split out preallocate_sub_pgd()
From: Brendan Jackman
Date: Wed Feb 25 2026 - 11:34:44 EST
This code will be needed elsewhere in a following patch. Split out the
trivial code move for easy review.
This changes the logging slightly: instead of panic() directly reporting
the level of the failure, there is now a generic panic message which
will be preceded by a separate warn that reports the level of the
failure. This is a simple way to have this helper suit the needs of its
new user as well as the existing one.
Other than logging, no functional change intended.
Signed-off-by: Brendan Jackman <jackmanb@xxxxxxxxxx>
---
arch/x86/include/asm/pgalloc.h | 33 +++++++++++++++++++++++++++++++
arch/x86/mm/init_64.c | 44 +++++++-----------------------------------
2 files changed, 40 insertions(+), 37 deletions(-)
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index c88691b15f3c6..3541b86c9c6b0 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -2,6 +2,7 @@
#ifndef _ASM_X86_PGALLOC_H
#define _ASM_X86_PGALLOC_H
+#include <linux/printk.h>
#include <linux/threads.h>
#include <linux/mm.h> /* for struct page */
#include <linux/pagemap.h>
@@ -128,6 +129,38 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
___pud_free_tlb(tlb, pud);
}
+/* Allocate a pagetable pointed to by the top hardware level. */
+static inline int preallocate_sub_pgd(struct mm_struct *mm, unsigned long addr)
+{
+ const char *lvl;
+ p4d_t *p4d;
+ pud_t *pud;
+
+ lvl = "p4d";
+ p4d = p4d_alloc(mm, pgd_offset_pgd(mm->pgd, addr), addr);
+ if (!p4d)
+ goto failed;
+
+ if (pgtable_l5_enabled())
+ return 0;
+
+ /*
+ * On 4-level systems, the P4D layer is folded away and
+ * the above code does no preallocation. Below, go down
+ * to the pud _software_ level to ensure the second
+ * hardware level is allocated on 4-level systems too.
+ */
+ lvl = "pud";
+ pud = pud_alloc(mm, p4d, addr);
+ if (!pud)
+ goto failed;
+ return 0;
+
+failed:
+ pr_warn_ratelimited("Failed to preallocate %s\n", lvl);
+ return -ENOMEM;
+}
+
#if CONFIG_PGTABLE_LEVELS > 4
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index df2261fa4f985..79806386dc42f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1318,46 +1318,16 @@ static void __init register_page_bootmem_info(void)
static void __init preallocate_vmalloc_pages(void)
{
unsigned long addr;
- const char *lvl;
for (addr = VMALLOC_START; addr <= VMEMORY_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
- pgd_t *pgd = pgd_offset_k(addr);
- p4d_t *p4d;
- pud_t *pud;
-
- lvl = "p4d";
- p4d = p4d_alloc(&init_mm, pgd, addr);
- if (!p4d)
- goto failed;
-
- if (pgtable_l5_enabled())
- continue;
-
- /*
- * The goal here is to allocate all possibly required
- * hardware page tables pointed to by the top hardware
- * level.
- *
- * On 4-level systems, the P4D layer is folded away and
- * the above code does no preallocation. Below, go down
- * to the pud _software_ level to ensure the second
- * hardware level is allocated on 4-level systems too.
- */
- lvl = "pud";
- pud = pud_alloc(&init_mm, p4d, addr);
- if (!pud)
- goto failed;
+ if (preallocate_sub_pgd(&init_mm, addr)) {
+ /*
+ * The pages have to be there now or they will be
+ * missing in process page-tables later.
+ */
+ panic("Failed to pre-allocate pagetables for vmalloc area\n");
+ }
}
-
- return;
-
-failed:
-
- /*
- * The pages have to be there now or they will be missing in
- * process page-tables later.
- */
- panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl);
}
void __init arch_mm_preinit(void)
--
2.51.2