[PATCH] mm, sparse: Fix boot on arm64

From: Kirill A. Shutemov
Date: Thu Nov 02 2017 - 10:02:29 EST


Since 83e3c48729d9 ("mm/sparsemem: Allocate mem_section at runtime for
CONFIG_SPARSEMEM_EXTREME=y") we allocate mem_section dynamically in
sparse_memory_present_with_active_regions(). But some architectures, like
arm64, don't use the routine to initialize sparsemem.

Let's move the initialization into memory_present() it should cover all
architectures.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
Fixes: 83e3c48729d9 ("mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y")
---
mm/page_alloc.c | 10 ----------
mm/sparse.c | 10 ++++++++++
2 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8dfd13f724d9..77e4d3c5c57b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5646,16 +5646,6 @@ void __init sparse_memory_present_with_active_regions(int nid)
unsigned long start_pfn, end_pfn;
int i, this_nid;

-#ifdef CONFIG_SPARSEMEM_EXTREME
- if (!mem_section) {
- unsigned long size, align;
-
- size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
- align = 1 << (INTERNODE_CACHE_SHIFT);
- mem_section = memblock_virt_alloc(size, align);
- }
-#endif
-
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
memory_present(this_nid, start_pfn, end_pfn);
}
diff --git a/mm/sparse.c b/mm/sparse.c
index b00a97398795..d294148ba395 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -206,6 +206,16 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
{
unsigned long pfn;

+#ifdef CONFIG_SPARSEMEM_EXTREME
+ if (unlikely(!mem_section)) {
+ unsigned long size, align;
+
+ size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
+ align = 1 << (INTERNODE_CACHE_SHIFT);
+ mem_section = memblock_virt_alloc(size, align);
+ }
+#endif
+
start &= PAGE_SECTION_MASK;
mminit_validate_memmodel_limits(&start, &end);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
--
Kirill A. Shutemov