[PATCH 2/4] mm/vmstat: use zone_stat_add_folio/sub_folio for folio_nr_pages operations
From: Ye Liu
Date: Tue Apr 14 2026 - 05:16:38 EST
From: Ye Liu <liuye@xxxxxxxxxx>
Replace zone_stat_mod_folio() calls that pass folio_nr_pages(folio) or
-folio_nr_pages(folio) as the third argument with the more concise
zone_stat_add_folio() and zone_stat_sub_folio() functions respectively.
This makes the code more readable and reduces the number of arguments
passed to these functions.
Signed-off-by: Ye Liu <liuye@xxxxxxxxxx>
---
mm/mlock.c | 4 ++--
mm/page-writeback.c | 8 ++++----
mm/page_alloc.c | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/mm/mlock.c b/mm/mlock.c
index 8c227fefa2df..0cafcb2f4e8d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -249,7 +249,7 @@ void mlock_folio(struct folio *folio)
if (!folio_test_set_mlocked(folio)) {
int nr_pages = folio_nr_pages(folio);
- zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
+ zone_stat_add_folio(folio, NR_MLOCK);
__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
}
@@ -273,7 +273,7 @@ void mlock_new_folio(struct folio *folio)
fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
folio_set_mlocked(folio);
- zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
+ zone_stat_add_folio(folio, NR_MLOCK);
__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
folio_get(folio);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 87e9ea41313a..6f9b7b081ab7 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2647,7 +2647,7 @@ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
long nr = folio_nr_pages(folio);
lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
- zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
+ zone_stat_sub_folio(folio, NR_ZONE_WRITE_PENDING);
wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
task_io_account_cancelled_write(nr * PAGE_SIZE);
}
@@ -2916,7 +2916,7 @@ bool folio_clear_dirty_for_io(struct folio *folio)
if (folio_test_clear_dirty(folio)) {
long nr = folio_nr_pages(folio);
lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, -nr);
- zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
+ zone_stat_sub_folio(folio, NR_ZONE_WRITE_PENDING);
wb_stat_mod(wb, WB_RECLAIMABLE, -nr);
ret = true;
}
@@ -2980,7 +2980,7 @@ bool __folio_end_writeback(struct folio *folio)
}
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
- zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
+ zone_stat_sub_folio(folio, NR_ZONE_WRITE_PENDING);
node_stat_add_folio(folio, NR_WRITTEN);
return ret;
@@ -3032,7 +3032,7 @@ void __folio_start_writeback(struct folio *folio, bool keep_write)
}
lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
- zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
+ zone_stat_add_folio(folio, NR_ZONE_WRITE_PENDING);
access_ret = arch_make_folio_accessible(folio);
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 65e205111553..a81ceb4181ea 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1327,7 +1327,7 @@ __always_inline bool __free_pages_prepare(struct page *page,
long nr_pages = folio_nr_pages(folio);
__folio_clear_mlocked(folio);
- zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
+ zone_stat_sub_folio(folio, NR_MLOCK);
count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
}
--
2.43.0