[PATCH v3 2/9] riscv: mm: Pre-allocate vmemmap/direct map PGD entries

From: Björn Töpel
Date: Tue May 21 2024 - 07:49:16 EST


From: Björn Töpel <bjorn@xxxxxxxxxxxx>

The RISC-V port copies the PGD table from init_mm/swapper_pg_dir to
all userland page tables, which means that if the PGD level table is
changed, other page tables has to be updated as well.

Instead of having the PGD changes ripple out to all tables, the
synchronization can be avoided by pre-allocating the PGD entries/pages
at boot, avoiding the synchronization all together.

This is currently done for the bpf/modules, and vmalloc PGD regions.
Extend this scheme for the PGD regions touched by memory hotplugging.

Prepare the RISC-V port for memory hotplug by pre-allocate
vmemmap/direct map entries at the PGD level. This will roughly waste
~128 worth of 4K pages when memory hotplugging is enabled in the
kernel configuration.

Reviewed-by: Alexandre Ghiti <alexghiti@xxxxxxxxxxxx>
Signed-off-by: Björn Töpel <bjorn@xxxxxxxxxxxx>
---
arch/riscv/include/asm/kasan.h | 4 ++--
arch/riscv/mm/init.c | 7 +++++++
2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
index 0b85e363e778..e6a0071bdb56 100644
--- a/arch/riscv/include/asm/kasan.h
+++ b/arch/riscv/include/asm/kasan.h
@@ -6,8 +6,6 @@

#ifndef __ASSEMBLY__

-#ifdef CONFIG_KASAN
-
/*
* The following comment was copied from arm64:
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
@@ -34,6 +32,8 @@
*/
#define KASAN_SHADOW_START ((KASAN_SHADOW_END - KASAN_SHADOW_SIZE) & PGDIR_MASK)
#define KASAN_SHADOW_END MODULES_LOWEST_VADDR
+
+#ifdef CONFIG_KASAN
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)

void kasan_init(void);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index b66f846e7634..c98010ede810 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -27,6 +27,7 @@

#include <asm/fixmap.h>
#include <asm/io.h>
+#include <asm/kasan.h>
#include <asm/numa.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
@@ -1488,10 +1489,16 @@ static void __init preallocate_pgd_pages_range(unsigned long start, unsigned lon
panic("Failed to pre-allocate %s pages for %s area\n", lvl, area);
}

+#define PAGE_END KASAN_SHADOW_START
+
void __init pgtable_cache_init(void)
{
preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc");
if (IS_ENABLED(CONFIG_MODULES))
preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules");
+ if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
+ preallocate_pgd_pages_range(VMEMMAP_START, VMEMMAP_END, "vmemmap");
+ preallocate_pgd_pages_range(PAGE_OFFSET, PAGE_END, "direct map");
+ }
}
#endif
--
2.40.1