[RFC v10 12/13] mm/damon/paddr: Separate commonly usable functions
From: SeongJae Park
Date: Wed Dec 16 2020 - 04:48:28 EST
From: SeongJae Park <sjpark@xxxxxxxxx>
This commit moves functions in the default physical address space
monitoring primitives that commonly usable from other use cases like
page granularity idleness monitoring to prmtv-common.
Signed-off-by: SeongJae Park <sjpark@xxxxxxxxx>
---
mm/damon/paddr.c | 122 ----------------------------------------
mm/damon/prmtv-common.c | 122 ++++++++++++++++++++++++++++++++++++++++
mm/damon/prmtv-common.h | 4 ++
3 files changed, 126 insertions(+), 122 deletions(-)
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index b120f672cc57..143ddc0e5917 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -19,69 +19,6 @@
* of the primitives.
*/
-/*
- * Get a page by pfn if it is in the LRU list. Otherwise, returns NULL.
- *
- * The body of this function is stollen from the 'page_idle_get_page()'. We
- * steal rather than reuse it because the code is quite simple.
- */
-static struct page *damon_pa_get_page(unsigned long pfn)
-{
- struct page *page = pfn_to_online_page(pfn);
- pg_data_t *pgdat;
-
- if (!page || !PageLRU(page) ||
- !get_page_unless_zero(page))
- return NULL;
-
- pgdat = page_pgdat(page);
- spin_lock_irq(&pgdat->lru_lock);
- if (unlikely(!PageLRU(page))) {
- put_page(page);
- page = NULL;
- }
- spin_unlock_irq(&pgdat->lru_lock);
- return page;
-}
-
-static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
- unsigned long addr, void *arg)
-{
- damon_va_mkold(vma->vm_mm, addr);
- return true;
-}
-
-static void damon_pa_mkold(unsigned long paddr)
-{
- struct page *page = damon_pa_get_page(PHYS_PFN(paddr));
- struct rmap_walk_control rwc = {
- .rmap_one = __damon_pa_mkold,
- .anon_lock = page_lock_anon_vma_read,
- };
- bool need_lock;
-
- if (!page)
- return;
-
- if (!page_mapped(page) || !page_rmapping(page)) {
- set_page_idle(page);
- put_page(page);
- return;
- }
-
- need_lock = !PageAnon(page) || PageKsm(page);
- if (need_lock && !trylock_page(page)) {
- put_page(page);
- return;
- }
-
- rmap_walk(page, &rwc);
-
- if (need_lock)
- unlock_page(page);
- put_page(page);
-}
-
static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
struct damon_region *r)
{
@@ -101,65 +38,6 @@ void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
}
}
-struct damon_pa_access_chk_result {
- unsigned long page_sz;
- bool accessed;
-};
-
-static bool damon_pa_accessed(struct page *page, struct vm_area_struct *vma,
- unsigned long addr, void *arg)
-{
- struct damon_pa_access_chk_result *result = arg;
-
- result->accessed = damon_va_young(vma->vm_mm, addr, &result->page_sz);
-
- /* If accessed, stop walking */
- return !result->accessed;
-}
-
-static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
-{
- struct page *page = damon_pa_get_page(PHYS_PFN(paddr));
- struct damon_pa_access_chk_result result = {
- .page_sz = PAGE_SIZE,
- .accessed = false,
- };
- struct rmap_walk_control rwc = {
- .arg = &result,
- .rmap_one = damon_pa_accessed,
- .anon_lock = page_lock_anon_vma_read,
- };
- bool need_lock;
-
- if (!page)
- return false;
-
- if (!page_mapped(page) || !page_rmapping(page)) {
- if (page_is_idle(page))
- result.accessed = false;
- else
- result.accessed = true;
- put_page(page);
- goto out;
- }
-
- need_lock = !PageAnon(page) || PageKsm(page);
- if (need_lock && !trylock_page(page)) {
- put_page(page);
- return NULL;
- }
-
- rmap_walk(page, &rwc);
-
- if (need_lock)
- unlock_page(page);
- put_page(page);
-
-out:
- *page_sz = result.page_sz;
- return result.accessed;
-}
-
/*
* Check whether the region was accessed after the last preparation
*
diff --git a/mm/damon/prmtv-common.c b/mm/damon/prmtv-common.c
index 6cdb96cbc9ef..6c2e760e086c 100644
--- a/mm/damon/prmtv-common.c
+++ b/mm/damon/prmtv-common.c
@@ -102,3 +102,125 @@ bool damon_va_young(struct mm_struct *mm, unsigned long addr,
return young;
}
+
+/*
+ * Get a page by pfn if it is in the LRU list. Otherwise, returns NULL.
+ *
+ * The body of this function is stollen from the 'page_idle_get_page()'. We
+ * steal rather than reuse it because the code is quite simple.
+ */
+static struct page *damon_pa_get_page(unsigned long pfn)
+{
+ struct page *page = pfn_to_online_page(pfn);
+ pg_data_t *pgdat;
+
+ if (!page || !PageLRU(page) ||
+ !get_page_unless_zero(page))
+ return NULL;
+
+ pgdat = page_pgdat(page);
+ spin_lock_irq(&pgdat->lru_lock);
+ if (unlikely(!PageLRU(page))) {
+ put_page(page);
+ page = NULL;
+ }
+ spin_unlock_irq(&pgdat->lru_lock);
+ return page;
+}
+
+static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, void *arg)
+{
+ damon_va_mkold(vma->vm_mm, addr);
+ return true;
+}
+
+void damon_pa_mkold(unsigned long paddr)
+{
+ struct page *page = damon_pa_get_page(PHYS_PFN(paddr));
+ struct rmap_walk_control rwc = {
+ .rmap_one = __damon_pa_mkold,
+ .anon_lock = page_lock_anon_vma_read,
+ };
+ bool need_lock;
+
+ if (!page)
+ return;
+
+ if (!page_mapped(page) || !page_rmapping(page)) {
+ set_page_idle(page);
+ put_page(page);
+ return;
+ }
+
+ need_lock = !PageAnon(page) || PageKsm(page);
+ if (need_lock && !trylock_page(page)) {
+ put_page(page);
+ return;
+ }
+
+ rmap_walk(page, &rwc);
+
+ if (need_lock)
+ unlock_page(page);
+ put_page(page);
+}
+
+struct damon_pa_access_chk_result {
+ unsigned long page_sz;
+ bool accessed;
+};
+
+static bool damon_pa_accessed(struct page *page, struct vm_area_struct *vma,
+ unsigned long addr, void *arg)
+{
+ struct damon_pa_access_chk_result *result = arg;
+
+ result->accessed = damon_va_young(vma->vm_mm, addr, &result->page_sz);
+
+ /* If accessed, stop walking */
+ return !result->accessed;
+}
+
+bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
+{
+ struct page *page = damon_pa_get_page(PHYS_PFN(paddr));
+ struct damon_pa_access_chk_result result = {
+ .page_sz = PAGE_SIZE,
+ .accessed = false,
+ };
+ struct rmap_walk_control rwc = {
+ .arg = &result,
+ .rmap_one = damon_pa_accessed,
+ .anon_lock = page_lock_anon_vma_read,
+ };
+ bool need_lock;
+
+ if (!page)
+ return false;
+
+ if (!page_mapped(page) || !page_rmapping(page)) {
+ if (page_is_idle(page))
+ result.accessed = false;
+ else
+ result.accessed = true;
+ put_page(page);
+ goto out;
+ }
+
+ need_lock = !PageAnon(page) || PageKsm(page);
+ if (need_lock && !trylock_page(page)) {
+ put_page(page);
+ return NULL;
+ }
+
+ rmap_walk(page, &rwc);
+
+ if (need_lock)
+ unlock_page(page);
+ put_page(page);
+
+out:
+ *page_sz = result.page_sz;
+ return result.accessed;
+}
diff --git a/mm/damon/prmtv-common.h b/mm/damon/prmtv-common.h
index a66a6139b4fc..fbe9452bd040 100644
--- a/mm/damon/prmtv-common.h
+++ b/mm/damon/prmtv-common.h
@@ -10,6 +10,7 @@
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/random.h>
+#include <linux/rmap.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
@@ -19,3 +20,6 @@
void damon_va_mkold(struct mm_struct *mm, unsigned long addr);
bool damon_va_young(struct mm_struct *mm, unsigned long addr,
unsigned long *page_sz);
+
+void damon_pa_mkold(unsigned long paddr);
+bool damon_pa_young(unsigned long paddr, unsigned long *page_sz);
--
2.17.1