[PATCH 08/11] mm/ksm: Convert stable_tree_insert to use folio

From: alexs
Date: Wed Mar 20 2024 - 03:39:16 EST


From: "Alex Shi (tencent)" <alexs@xxxxxxxxxx>

KSM stable tree only store single page, so convert the func users to use
folio and save few compound_head calls.

Signed-off-by: Alex Shi (tencent) <alexs@xxxxxxxxxx>
Cc: Izik Eidus <izik.eidus@xxxxxxxxxxxxxxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx>
Cc: Hugh Dickins <hughd@xxxxxxxxxx>
Cc: Chris Wright <chrisw@xxxxxxxxxxxx>
---
mm/ksm.c | 20 ++++++++++----------
1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/mm/ksm.c b/mm/ksm.c
index 648fa695424b..71d1a52f344d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2062,7 +2062,7 @@ static struct page *stable_tree_search(struct page *page)
* This function returns the stable tree node just allocated on success,
* NULL otherwise.
*/
-static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
+static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
{
int nid;
unsigned long kpfn;
@@ -2072,7 +2072,7 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
bool need_chain = false;

- kpfn = page_to_pfn(kpage);
+ kpfn = folio_pfn(kfolio);
nid = get_kpfn_nid(kpfn);
root = root_stable_tree + nid;
again:
@@ -2080,13 +2080,13 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
new = &root->rb_node;

while (*new) {
- struct page *tree_page;
+ struct folio *tree_folio;
int ret;

cond_resched();
stable_node = rb_entry(*new, struct ksm_stable_node, node);
stable_node_any = NULL;
- tree_page = chain(&stable_node_dup, stable_node, root);
+ tree_folio = chain(&stable_node_dup, stable_node, root);
if (!stable_node_dup) {
/*
* Either all stable_node dups were full in
@@ -2108,11 +2108,11 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
* write protected at all times. Any will work
* fine to continue the walk.
*/
- tree_page = get_ksm_page(stable_node_any,
+ tree_folio = get_ksm_page(stable_node_any,
GET_KSM_PAGE_NOLOCK);
}
VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
- if (!tree_page) {
+ if (!tree_folio) {
/*
* If we walked over a stale stable_node,
* get_ksm_page() will call rb_erase() and it
@@ -2125,8 +2125,8 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
goto again;
}

- ret = memcmp_pages(kpage, tree_page);
- put_page(tree_page);
+ ret = memcmp_pages(&kfolio->page, &tree_folio->page);
+ folio_put(tree_folio);

parent = *new;
if (ret < 0)
@@ -2145,7 +2145,7 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage)

INIT_HLIST_HEAD(&stable_node_dup->hlist);
stable_node_dup->kpfn = kpfn;
- set_page_stable_node(kpage, stable_node_dup);
+ set_page_stable_node(&kfolio->page, stable_node_dup);
stable_node_dup->rmap_hlist_len = 0;
DO_NUMA(stable_node_dup->nid = nid);
if (!need_chain) {
@@ -2423,7 +2423,7 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite
* node in the stable tree and add both rmap_items.
*/
lock_page(kpage);
- stable_node = stable_tree_insert(kpage);
+ stable_node = stable_tree_insert(page_folio(kpage));
if (stable_node) {
stable_tree_append(tree_rmap_item, stable_node,
false);
--
2.43.0