[PATCH RFC 04/11] x86/mm/asi: Sync physmap into ASI_GLOBAL_NONSENSITIVE

From: Brendan Jackman
Date: Thu Mar 13 2025 - 14:12:41 EST


Mirror the physmap into the ASI pagetables, but with a maximum
granularity that's guaranteed to allow changing pageblock sensitivity
without having to allocate pagetables.

Signed-off-by: Brendan Jackman <jackmanb@xxxxxxxxxx>
---
arch/x86/mm/init_64.c | 30 ++++++++++++++++++++++++++++--
1 file changed, 28 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 8f75274fddd96b8285aff48493ebad93e30daebe..4ca6bb419b9643b0e72cb5b6da6d905f2b2be84b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -7,6 +7,7 @@
* Copyright (C) 2002,2003 Andi Kleen <ak@xxxxxxx>
*/

+#include <linux/asi.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -736,7 +737,8 @@ phys_pgd_init(pgd_t *pgd_page, unsigned long paddr_start, unsigned long paddr_en
{
unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;

- *pgd_changed = false;
+ if (pgd_changed)
+ *pgd_changed = false;

paddr_last = paddr_end;
vaddr = (unsigned long)__va(paddr_start);
@@ -770,9 +772,13 @@ phys_pgd_init(pgd_t *pgd_page, unsigned long paddr_start, unsigned long paddr_en
(pud_t *) p4d, init);

spin_unlock(&init_mm.page_table_lock);
- *pgd_changed = true;
+ if (pgd_changed)
+ *pgd_changed = true;
}

+ if (pgd_changed)
+ sync_global_pgds(vaddr_start, vaddr_end - 1);
+
return paddr_last;
}

@@ -784,9 +790,29 @@ __kernel_physical_mapping_init(unsigned long paddr_start,
{
bool pgd_changed;
unsigned long paddr_last;
+ pgd_t *pgd_asi = asi_pgd(ASI_GLOBAL_NONSENSITIVE);

paddr_last = phys_pgd_init(init_mm.pgd, paddr_start, paddr_end, page_size_mask,
prot, init, &pgd_changed);
+
+ /*
+ * Set up ASI's global-nonsensitive physmap. This needs to mapped at max
+ * 2M size so that regions can be mapped and unmapped at pageblock
+ * granularity without requiring allocations.
+ */
+ if (pgd_asi) {
+ /*
+ * Since most memory is expected to end up sensitive, start with
+ * everything unmapped in this pagetable. The page allocator
+ * assumes that's the case.
+ */
+ pgprot_t prot_np = __pgprot(pgprot_val(prot) & ~_PAGE_PRESENT);
+
+ VM_BUG_ON((PAGE_SHIFT + pageblock_order) < page_level_shift(PG_LEVEL_2M));
+ phys_pgd_init(pgd_asi, paddr_start, paddr_end, 1 << PG_LEVEL_2M,
+ prot_np, init, NULL);
+ }
+
if (pgd_changed)
sync_global_pgds((unsigned long)__va(paddr_start),
(unsigned long)__va(paddr_end) - 1);

--
2.49.0.rc1.451.g8f38331e32-goog