[PATCH v3 07/14] x86: KASAN raw shadow memory PTE init
From: Maciej Wieczor-Retman
Date: Fri Apr 04 2025 - 09:17:32 EST
In KASAN's generic mode the default value in shadow memory is zero.
During initialization of shadow memory pages they are allocated and
zeroed.
In KASAN's tag-based mode the default tag for the arm64 architecture is
0xFE which corresponds to any memory that should not be accessed. On x86
(where tags are 4-bit wide instead of 8-bit wide) that tag is 0xE so
during the initializations all the bytes in shadow memory pages should
be filled with 0xE.
Use memblock_alloc_try_nid_raw() instead of memblock_alloc_try_nid() to
avoid zeroing out the memory so it can be set with the KASAN invalid
tag.
Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@xxxxxxxxx>
---
Changelog v2:
- Remove dense mode references, use memset() instead of kasan_poison().
arch/x86/mm/kasan_init_64.c | 19 ++++++++++++++++---
1 file changed, 16 insertions(+), 3 deletions(-)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 0539efd0d216..e8a451cafc8c 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -34,6 +34,18 @@ static __init void *early_alloc(size_t size, int nid, bool should_panic)
return ptr;
}
+static __init void *early_raw_alloc(size_t size, int nid, bool should_panic)
+{
+ void *ptr = memblock_alloc_try_nid_raw(size, size,
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+
+ if (!ptr && should_panic)
+ panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
+ (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
+
+ return ptr;
+}
+
static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
unsigned long end, int nid)
{
@@ -63,8 +75,9 @@ static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
if (!pte_none(*pte))
continue;
- p = early_alloc(PAGE_SIZE, nid, true);
- entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
+ p = early_raw_alloc(PAGE_SIZE, nid, true);
+ memset(p, PAGE_SIZE, KASAN_SHADOW_INIT);
+ entry = pfn_pte(PFN_DOWN(__pa_nodebug(p)), PAGE_KERNEL);
set_pte_at(&init_mm, addr, pte, entry);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
@@ -436,7 +449,7 @@ void __init kasan_init(void)
* it may contain some garbage. Now we can clear and write protect it,
* since after the TLB flush no one should write to it.
*/
- memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+ memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
for (i = 0; i < PTRS_PER_PTE; i++) {
pte_t pte;
pgprot_t prot;
--
2.49.0