[PATCHv2] mm: introduce NR_BAD_PAGES and track them via kmemleak

From: zhaoyang.huang
Date: Tue Sep 20 2022 - 23:20:06 EST


From: Zhaoyang Huang <zhaoyang.huang@xxxxxxxxxx>

Bad pages could be introduced by extra reference among high order pages or compound
tail pages which cause the pages failed go back to allocator and leaved as orphan
pages. Booking them down and tracking them via kmemleak.

Signed-off-by: Zhaoyang Huang <zhaoyang.huang@xxxxxxxxxx>
---
change of v2: add booking for bad pages
---
---
include/linux/mmzone.h | 1 +
mm/page_alloc.c | 13 ++++++++++---
mm/vmstat.c | 1 +
3 files changed, 12 insertions(+), 3 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e24b40c..11c1422 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -166,6 +166,7 @@ enum zone_stat_item {
NR_ZSPAGES, /* allocated in zsmalloc */
#endif
NR_FREE_CMA_PAGES,
+ NR_BAD_PAGES,
NR_VM_ZONE_STAT_ITEMS };

enum node_stat_item {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e5486d4..a3768c96 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1408,7 +1408,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
__memcg_kmem_uncharge_page(page, order);
reset_page_owner(page, order);
page_table_check_free(page, order);
- return false;
+ goto err;
}

/*
@@ -1442,7 +1442,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
if (check_free)
bad += check_free_page(page);
if (bad)
- return false;
+ goto err;

page_cpupid_reset_last(page);
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
@@ -1486,6 +1486,11 @@ static __always_inline bool free_pages_prepare(struct page *page,
debug_pagealloc_unmap_pages(page, 1 << order);

return true;
+err:
+ __mod_zone_page_state(page_zone(page), NR_BAD_PAGES, 1 << order);
+ kmemleak_alloc(page_address(page), PAGE_SIZE << order, 1, GFP_KERNEL);
+ return false;
+
}

#ifdef CONFIG_DEBUG_VM
@@ -1587,8 +1592,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
count -= nr_pages;
pcp->count -= nr_pages;

- if (bulkfree_pcp_prepare(page))
+ if (bulkfree_pcp_prepare(page)) {
+ __mod_zone_page_state(page_zone(page), NR_BAD_PAGES, 1 << order);
continue;
+ }

/* MIGRATE_ISOLATE page should not go to pcplists */
VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 90af9a8..d391352 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1193,6 +1193,7 @@ int fragmentation_index(struct zone *zone, unsigned int order)
"nr_zspages",
#endif
"nr_free_cma",
+ "nr_bad_pages",

/* enum numa_stat_item counters */
#ifdef CONFIG_NUMA
--
1.9.1