[RFC PATCH] mm: Add a new page type to mark unavailable pages

From: Baoquan He
Date: Fri May 29 2020 - 08:23:13 EST


Firstly, the last user of early_pfn_valid() is memmap_init_zone(), but that code
has been optimized away. Let's remove it.

Secondly, add a new page type to mark unavailale pages.

Thirdly, add a new early_pfn_valid() so that init_unavailable_range()
can initialize those pages in memblock.reserved.

Signed-off-by: Baoquan He <bhe@xxxxxxxxxx>
---
arch/x86/include/asm/mmzone_32.h | 2 --
include/linux/mmzone.h | 18 ++++++++++--------
include/linux/page-flags.h | 8 ++++++++
mm/page_alloc.c | 3 ++-
4 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 73d8dd14dda2..4da45eff2942 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -49,8 +49,6 @@ static inline int pfn_valid(int pfn)
return 0;
}

-#define early_pfn_valid(pfn) pfn_valid((pfn))
-
#endif /* CONFIG_DISCONTIGMEM */

#endif /* _ASM_X86_MMZONE_32_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e8d7d0b0acf4..47d09be73dca 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1315,20 +1315,23 @@ static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
#endif

#ifndef CONFIG_HAVE_ARCH_PFN_VALID
-static inline int pfn_valid(unsigned long pfn)
+static inline int early_pfn_valid(unsigned long pfn)
{
- struct mem_section *ms;
-
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- ms = __nr_to_section(pfn_to_section_nr(pfn));
- if (!valid_section(ms))
+ return valid_section(__pfn_to_section(pfn))
+}
+
+static inline int pfn_valid(unsigned long pfn)
+{
+ if (!early_pfn_valid(pfn))
return 0;
/*
* Traditionally early sections always returned pfn_valid() for
* the entire section-sized span.
*/
- return early_section(ms) || pfn_section_valid(ms, pfn);
+ return (early_section(ms) && PageUnavail(pfn_to_page(pfn))) ||
+ pfn_section_valid(ms, pfn);
}
#endif

@@ -1364,7 +1367,6 @@ static inline unsigned long next_present_section_nr(unsigned long section_nr)
#define pfn_to_nid(pfn) (0)
#endif

-#define early_pfn_valid(pfn) pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init() do {} while (0)
@@ -1385,7 +1387,7 @@ struct mminit_pfnnid_cache {
};

#ifndef early_pfn_valid
-#define early_pfn_valid(pfn) (1)
+#define early_pfn_valid(pfn) pfn_valid(pfn)
#endif

void memory_present(int nid, unsigned long start, unsigned long end);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 222f6f7b2bb3..1c011008100c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -740,6 +740,7 @@ PAGEFLAG_FALSE(DoubleMap)
#define PG_kmemcg 0x00000200
#define PG_table 0x00000400
#define PG_guard 0x00000800
+#define PG_unavail 0x00001000

#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
@@ -796,6 +797,13 @@ PAGE_TYPE_OPS(Table, table)
*/
PAGE_TYPE_OPS(Guard, guard)

+/*
+ * Mark pages which are unavailable to memory allocators after boot.
+ * They includes pages reserved by firmware pre-boot or during boot,
+ * holes which share the same setcion with usable RAM.
+ */
+PAGE_TYPE_OPS(Unavail, unavail)
+
extern bool is_free_buddy_page(struct page *page);

__PAGEFLAG(Isolated, isolated, PF_ANY);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 940cdce96864..e9ebef6ffbec 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6972,7 +6972,8 @@ static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
* (in memblock.reserved but not in memblock.memory) will
* get re-initialized via reserve_bootmem_region() later.
*/
- __init_single_page(pfn_to_page(pfn), pfn, 0, 0);
+ mm_zero_struct_page(pfn_to_page(pfn));
+ __SetPageUnavail(pfn_to_page(pfn));
__SetPageReserved(pfn_to_page(pfn));
pgcnt++;
}
--
2.17.2