[RFC PATCH 4/5] mm: add page implyreclaim flag
From: Lincheng Yang
Date: Sun Oct 08 2023 - 06:00:33 EST
Add implyrecalim flag means that the page is reclaim from the user advise.
If the number of restore times for these implyrecalim pages exceeds
workingset_restore_limit, it means they are frequently used and are hot
pages. Otherwise, continue to determine whether it has been restored. If
so, it means that it is frequently used also and belongs to the hot page.
Signed-off-by: Lincheng Yang <lincheng.yang@xxxxxxxxxxxxx>
---
include/linux/mmzone.h | 1 +
include/linux/page-flags.h | 3 ++
include/trace/events/mmflags.h | 3 +-
mm/madvise.c | 1 +
mm/migrate.c | 2 ++
mm/swapfile.c | 64 +++++++++++++++++++++++++++++++++-
mm/vmscan.c | 3 ++
7 files changed, 75 insertions(+), 2 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5e50b78d58ea..b280e6b0015a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -626,6 +626,7 @@ struct lruvec {
atomic_long_t nonresident_age;
/* Refaults at the time of last reclaim cycle */
unsigned long refaults[ANON_AND_FILE];
+ unsigned long restores[ANON_AND_FILE];
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
#ifdef CONFIG_LRU_GEN
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index a2c83c0100aa..4a1278851d4b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -138,6 +138,7 @@ enum pageflags {
#endif
PG_hot,
PG_cold,
+ PG_implyreclaim,
__NR_PAGEFLAGS,
PG_readahead = PG_reclaim,
@@ -482,6 +483,8 @@ PAGEFLAG(Hot, hot, PF_HEAD)
TESTCLEARFLAG(Hot, hot, PF_HEAD)
PAGEFLAG(Cold, cold, PF_HEAD)
TESTCLEARFLAG(Cold, cold, PF_HEAD)
+PAGEFLAG(Implyreclaim, implyreclaim, PF_HEAD)
+ TESTCLEARFLAG(Implyreclaim, implyreclaim, PF_HEAD)
__PAGEFLAG(Slab, slab, PF_NO_TAIL)
PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index f266f92c41c6..ee014f955aef 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -120,7 +120,8 @@
DEF_PAGEFLAG_NAME(swapbacked), \
DEF_PAGEFLAG_NAME(unevictable), \
DEF_PAGEFLAG_NAME(hot), \
- DEF_PAGEFLAG_NAME(cold) \
+ DEF_PAGEFLAG_NAME(cold), \
+ DEF_PAGEFLAG_NAME(implyreclaim) \
IF_HAVE_PG_MLOCK(mlocked) \
IF_HAVE_PG_UNCACHED(uncached) \
IF_HAVE_PG_HWPOISON(hwpoison) \
diff --git a/mm/madvise.c b/mm/madvise.c
index a5c19bb3f392..199b48dfa8c5 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -518,6 +518,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
} else {
list_add(&folio->lru, &folio_list);
folio_set_cold(folio);
+ folio_set_implyreclaim(folio);
}
}
} else
diff --git a/mm/migrate.c b/mm/migrate.c
index 9f97744bb0a8..691b4f7bf1ae 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -565,6 +565,8 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
folio_set_hot(newfolio);
if (folio_test_cold(folio))
folio_set_cold(newfolio);
+ if (folio_test_implyreclaim(folio))
+ folio_set_implyreclaim(newfolio);
if (folio_test_checked(folio))
folio_set_checked(newfolio);
/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5378f70d330d..629e6a291e9b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -105,6 +105,8 @@ static atomic_t proc_poll_event = ATOMIC_INIT(0);
atomic_t nr_rotate_swap = ATOMIC_INIT(0);
+static unsigned int workingset_restore_limit;
+
static struct swap_info_struct *swap_type_to_swap_info(int type)
{
if (type >= MAX_SWAPFILES)
@@ -120,11 +122,33 @@ static inline bool swap_info_hot(struct swap_info_struct *si)
bool swap_folio_hot(struct folio *folio, bool hotness)
{
+ struct lruvec *lruvec;
+ struct mem_cgroup *memcg;
+ unsigned long restores;
+ int delta;
+
if (hotness)
return true;
- if (folio_test_swapbacked(folio) && folio_test_hot(folio))
+ if (folio_test_swapbacked(folio) && folio_test_hot(folio)) {
+ folio_clear_implyreclaim(folio);
+ return true;
+ }
+
+ rcu_read_lock(); // prevent writing from delaying reading
+ memcg = folio_memcg_rcu(folio);
+ rcu_read_unlock();
+
+ lruvec = mem_cgroup_lruvec(memcg, folio_pgdat(folio));
+ restores = lruvec_page_state(lruvec, WORKINGSET_RESTORE_ANON);
+ delta = restores - lruvec->restores[WORKINGSET_ANON];
+
+ if (folio_test_clear_implyreclaim(folio)) {
+ if (delta > workingset_restore_limit)
+ return true;
+ } else if (delta) {
return true;
+ }
if (folio_test_cold(folio))
folio_clear_cold(folio);
@@ -2715,9 +2739,47 @@ static const struct proc_ops swaps_proc_ops = {
.proc_poll = swaps_poll,
};
+static ssize_t workingset_restore_limit_write(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint_from_user(ubuf, count, 10, &val);
+ if (ret)
+ return ret;
+
+ workingset_restore_limit = val;
+
+ return count;
+}
+
+static int workingset_restore_limit_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%d\n", workingset_restore_limit);
+
+ return 0;
+}
+
+static int workingset_restore_limit_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, workingset_restore_limit_show, NULL);
+}
+
+const struct proc_ops workingset_restore_limit_fops = {
+ .proc_open = workingset_restore_limit_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
+ .proc_write = workingset_restore_limit_write,
+};
+
static int __init procswaps_init(void)
{
proc_create("swaps", 0, NULL, &swaps_proc_ops);
+ proc_create("workingset_restore_limit", S_IALLUGO, NULL, &workingset_restore_limit_fops);
+
return 0;
}
__initcall(procswaps_init);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 11d175d9fe0c..8107f8d86d7f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -6805,6 +6805,9 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
target_lruvec->refaults[WORKINGSET_ANON] = refaults;
refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
target_lruvec->refaults[WORKINGSET_FILE] = refaults;
+
+ refaults = lruvec_page_state(target_lruvec, WORKINGSET_RESTORE_ANON);
+ target_lruvec->restores[WORKINGSET_ANON] = refaults;
}
/*
--
2.34.1