[PATCH] arm64: mm: handle ARM64_KERNEL_USES_PMD_MAPS in vmemmap_populate()

From: Kefeng Wang
Date: Mon Sep 19 2022 - 21:46:38 EST


Directly check ARM64_SWAPPER_USES_SECTION_MAPS to choose base page
or PMD level huge page mapping in vmemmap_populate() to simplify
code a bit.

Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx>
---
arch/arm64/mm/mmu.c | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5810eddfb48e..784afa9c60ac 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1180,14 +1180,6 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
}
#endif

-#if !ARM64_KERNEL_USES_PMD_MAPS
-int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
- struct vmem_altmap *altmap)
-{
- WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
- return vmemmap_populate_basepages(start, end, node, altmap);
-}
-#else /* !ARM64_KERNEL_USES_PMD_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
@@ -1199,6 +1191,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
pmd_t *pmdp;

WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
+
+ if (!ARM64_KERNEL_USES_PMD_MAPS)
+ return vmemmap_populate_basepages(start, end, node, altmap);
+
do {
next = pmd_addr_end(addr, end);

@@ -1232,7 +1228,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,

return 0;
}
-#endif /* !ARM64_KERNEL_USES_PMD_MAPS */

#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
--
2.35.3