[PATCH 1/4] mm/vmstat: use node_stat_add_folio/sub_folio for folio_nr_pages operations

From: Ye Liu

Date: Tue Apr 14 2026 - 05:16:27 EST


From: Ye Liu <liuye@xxxxxxxxxx>

Replace node_stat_mod_folio() calls that pass folio_nr_pages(folio) or
-folio_nr_pages(folio) as the third argument with the more concise
node_stat_add_folio() and node_stat_sub_folio() functions respectively.

This makes the code more readable and reduces the number of arguments
passed to these functions.

Signed-off-by: Ye Liu <liuye@xxxxxxxxxx>
---
fs/nfs/internal.h | 2 +-
fs/nfs/write.c | 2 +-
mm/compaction.c | 5 ++---
mm/gup.c | 5 ++---
mm/khugepaged.c | 10 ++++------
mm/mempolicy.c | 5 ++---
mm/migrate.c | 12 +++++-------
mm/page-writeback.c | 4 ++--
mm/swap_state.c | 4 ++--
9 files changed, 21 insertions(+), 28 deletions(-)

diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index fc5456377160..f5c52a2d2a1f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -858,7 +858,7 @@ static inline void nfs_folio_mark_unstable(struct folio *folio,
/* This page is really still in write-back - just that the
* writeback is happening on the server now.
*/
- node_stat_mod_folio(folio, NR_WRITEBACK, nr);
+ node_stat_add_folio(folio, NR_WRITEBACK);
bdi_wb_stat_mod(inode, WB_WRITEBACK, nr);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index cc02b57de3c7..a8700824a61b 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -871,7 +871,7 @@ static void nfs_folio_clear_commit(struct folio *folio)
if (folio) {
long nr = folio_nr_pages(folio);

- node_stat_mod_folio(folio, NR_WRITEBACK, -nr);
+ node_stat_sub_folio(folio, NR_WRITEBACK);
bdi_wb_stat_mod(folio->mapping->host, WB_WRITEBACK, -nr);
}
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 3648ce22c807..d7ce622aeed1 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1215,9 +1215,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,

/* Successfully isolated */
lruvec_del_folio(lruvec, folio);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));

isolate_success:
list_add(&folio->lru, &cc->migratepages);
diff --git a/mm/gup.c b/mm/gup.c
index ad9ded39609c..2cb2efa20bff 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2304,9 +2304,8 @@ static unsigned long collect_longterm_unpinnable_folios(
continue;

list_add_tail(&folio->lru, movable_folio_list);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
}

return collected;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index b8452dbdb043..f662de753305 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -502,9 +502,8 @@ void __khugepaged_exit(struct mm_struct *mm)

static void release_pte_folio(struct folio *folio)
{
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- -folio_nr_pages(folio));
+ node_stat_sub_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
folio_unlock(folio);
folio_putback_lru(folio);
}
@@ -650,9 +649,8 @@ static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
result = SCAN_DEL_PAGE_LRU;
goto out;
}
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);

diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4e4421b22b59..1c413f66b35f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1259,9 +1259,8 @@ static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist,
if ((flags & MPOL_MF_MOVE_ALL) || !folio_maybe_mapped_shared(folio)) {
if (folio_isolate_lru(folio)) {
list_add_tail(&folio->lru, foliolist);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
} else {
/*
* Non-movable folio may reach here. And, there may be
diff --git a/mm/migrate.c b/mm/migrate.c
index 8a64291ab5b4..dc8cfee37a70 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -268,8 +268,8 @@ void putback_movable_pages(struct list_head *l)
if (unlikely(page_has_movable_ops(&folio->page))) {
putback_movable_ops_page(&folio->page);
} else {
- node_stat_mod_folio(folio, NR_ISOLATED_ANON +
- folio_is_file_lru(folio), -folio_nr_pages(folio));
+ node_stat_sub_folio(folio, NR_ISOLATED_ANON +
+ folio_is_file_lru(folio));
folio_putback_lru(folio);
}
}
@@ -2272,9 +2272,8 @@ static int __add_folio_for_migration(struct folio *folio, int node,
return 1;
} else if (folio_isolate_lru(folio)) {
list_add_tail(&folio->lru, pagelist);
- node_stat_mod_folio(folio,
- NR_ISOLATED_ANON + folio_is_file_lru(folio),
- folio_nr_pages(folio));
+ node_stat_add_folio(folio,
+ NR_ISOLATED_ANON + folio_is_file_lru(folio));
return 1;
}
return -EBUSY;
@@ -2726,8 +2725,7 @@ int migrate_misplaced_folio_prepare(struct folio *folio,
if (!folio_isolate_lru(folio))
return -EAGAIN;

- node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
- nr_pages);
+ node_stat_add_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio));
return 0;
}

diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 833f743f309f..87e9ea41313a 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2750,7 +2750,7 @@ bool folio_redirty_for_writepage(struct writeback_control *wbc,

wb = unlocked_inode_to_wb_begin(inode, &cookie);
current->nr_dirtied -= nr;
- node_stat_mod_folio(folio, NR_DIRTIED, -nr);
+ node_stat_sub_folio(folio, NR_DIRTIED);
wb_stat_mod(wb, WB_DIRTIED, -nr);
unlocked_inode_to_wb_end(inode, &cookie);
}
@@ -2981,7 +2981,7 @@ bool __folio_end_writeback(struct folio *folio)

lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
- node_stat_mod_folio(folio, NR_WRITTEN, nr);
+ node_stat_add_folio(folio, NR_WRITTEN);

return ret;
}
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 1415a5c54a43..d08e923c9979 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -160,7 +160,7 @@ void __swap_cache_add_folio(struct swap_cluster_info *ci,
folio_set_swapcache(folio);
folio->swap = entry;

- node_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
+ node_stat_add_folio(folio, NR_FILE_PAGES);
lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr_pages);
}

@@ -265,7 +265,7 @@ void __swap_cache_del_folio(struct swap_cluster_info *ci, struct folio *folio,

folio->swap.val = 0;
folio_clear_swapcache(folio);
- node_stat_mod_folio(folio, NR_FILE_PAGES, -nr_pages);
+ node_stat_sub_folio(folio, NR_FILE_PAGES);
lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr_pages);

if (!folio_swapped) {
--
2.43.0