[PATCH] mm/page_alloc: Avoid duplicate NR_FREE_PAGES updates in move_to_free_list()

From: Yajun Deng

Date: Fri Jan 09 2026 - 05:55:20 EST


In move_to_free_list(), when a page block changes its migration type,
we need to update free page counts for both the old and new types.
Originally, this was done by two calls to account_freepages(), which
updates NR_FREE_PAGES and also type-specific counters. However, this
causes NR_FREE_PAGES to be updated twice, while the net change is zero
in most cases.

This patch introduces a new function account_freepages_both() that
updates the statistics for both old and new migration types in one go.
It avoids the double update of NR_FREE_PAGES by computing the net change
only when the isolation status changes.

The optimization avoid duplicate NR_FREE_PAGES updates in
move_to_free_list().

Signed-off-by: Yajun Deng <yajun.deng@xxxxxxxxx>
---
mm/page_alloc.c | 37 ++++++++++++++++++++++++++++++-------
1 file changed, 30 insertions(+), 7 deletions(-)

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ebfa07632995..e51d8bd7ab7d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -812,6 +812,16 @@ compaction_capture(struct capture_control *capc, struct page *page,
}
#endif /* CONFIG_COMPACTION */

+static inline void account_specific_freepages(struct zone *zone, int nr_pages,
+ int migratetype)
+{
+ if (is_migrate_cma(migratetype))
+ __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
+ else if (migratetype == MIGRATE_HIGHATOMIC)
+ WRITE_ONCE(zone->nr_free_highatomic,
+ zone->nr_free_highatomic + nr_pages);
+}
+
static inline void account_freepages(struct zone *zone, int nr_pages,
int migratetype)
{
@@ -822,11 +832,25 @@ static inline void account_freepages(struct zone *zone, int nr_pages,

__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);

- if (is_migrate_cma(migratetype))
- __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
- else if (migratetype == MIGRATE_HIGHATOMIC)
- WRITE_ONCE(zone->nr_free_highatomic,
- zone->nr_free_highatomic + nr_pages);
+ account_specific_freepages(zone, nr_pages, migratetype);
+}
+
+static inline void account_freepages_both(struct zone *zone, int nr_pages,
+ int old_mt, int new_mt)
+{
+ lockdep_assert_held(&zone->lock);
+
+ bool old_isolated = is_migrate_isolate(old_mt);
+ bool new_isolated = is_migrate_isolate(new_mt);
+
+ if (old_isolated != new_isolated)
+ __mod_zone_page_state(zone, NR_FREE_PAGES,
+ old_isolated ? nr_pages : -nr_pages);
+
+ if (!old_isolated)
+ account_specific_freepages(zone, -nr_pages, old_mt);
+ if (!new_isolated)
+ account_specific_freepages(zone, nr_pages, new_mt);
}

/* Used for pages not on another list */
@@ -869,8 +893,7 @@ static inline void move_to_free_list(struct page *page, struct zone *zone,

list_move_tail(&page->buddy_list, &area->free_list[new_mt]);

- account_freepages(zone, -nr_pages, old_mt);
- account_freepages(zone, nr_pages, new_mt);
+ account_freepages_both(zone, nr_pages, old_mt, new_mt);

if (order >= pageblock_order &&
is_migrate_isolate(old_mt) != is_migrate_isolate(new_mt)) {
--
2.34.1