[PATCH v8 03/12] mm/bootmem_info: Introduce free_bootmem_page helper
From: Muchun Song
Date: Wed Dec 09 2020 - 23:07:20 EST
Any memory allocated via the memblock allocator and not via the buddy
will be makred reserved already in the memmap. For those pages, we can
call free_bootmem_page() to free it to buddy allocator.
Becasue we wan to free some vmemmap pages of the HugeTLB to the buddy
allocator, we can use this helper to do that in the later patchs.
Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
include/linux/bootmem_info.h | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index 4ed6dee1adc9..20a8b0df0c39 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -3,6 +3,7 @@
#define __LINUX_BOOTMEM_INFO_H
#include <linux/mmzone.h>
+#include <linux/mm.h>
/*
* Types for free bootmem stored in page->lru.next. These have to be in
@@ -22,6 +23,24 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
void get_page_bootmem(unsigned long info, struct page *page,
unsigned long type);
void put_page_bootmem(struct page *page);
+
+/*
+ * Any memory allocated via the memblock allocator and not via the
+ * buddy will be makred reserved already in the memmap. For those
+ * pages, we can call this function to free it to buddy allocator.
+ */
+static inline void free_bootmem_page(struct page *page)
+{
+ unsigned long magic = (unsigned long)page->freelist;
+
+ /* bootmem page has reserved flag in the reserve_bootmem_region */
+ VM_WARN_ON(!PageReserved(page) || page_ref_count(page) != 2);
+
+ if (magic == SECTION_INFO || magic == MIX_SECTION_INFO)
+ put_page_bootmem(page);
+ else
+ WARN_ON(1);
+}
#else
static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
--
2.11.0