[PATCH mm-unstable 2/5] mm/mempolicy: Convert queue_pages_pte_range() to queue_folios_pte_range()
From: Vishal Moola (Oracle)
Date: Wed Jan 18 2023 - 18:22:35 EST
This function now operates on folios associated with ptes instead of
pages.
This change is in preparation for the conversion of
queue_pages_required() to queue_folio_required() and migrate_page_add()
to migrate_folio_add().
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx>
---
mm/mempolicy.c | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 00fffa93adae..ae9d16124f45 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -491,19 +491,19 @@ static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
* Scan through pages checking if pages follow certain conditions,
* and move them to the pagelist if they do.
*
- * queue_pages_pte_range() has three possible return values:
- * 0 - pages are placed on the right node or queued successfully, or
+ * queue_folios_pte_range() has three possible return values:
+ * 0 - folios are placed on the right node or queued successfully, or
* special page is met, i.e. zero page.
- * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
* specified.
- * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
+ * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
* on a node that does not follow the policy.
*/
-static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
+static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
struct vm_area_struct *vma = walk->vma;
- struct page *page;
+ struct folio *folio;
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
bool has_unmovable = false;
@@ -521,16 +521,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
- page = vm_normal_page(vma, addr, *pte);
- if (!page || is_zone_device_page(page))
+ folio = vm_normal_folio(vma, addr, *pte);
+ if (!folio || folio_is_zone_device(folio))
continue;
/*
- * vm_normal_page() filters out zero pages, but there might
- * still be PageReserved pages to skip, perhaps in a VDSO.
+ * vm_normal_folio() filters out zero pages, but there might
+ * still be reserved folios to skip, perhaps in a VDSO.
*/
- if (PageReserved(page))
+ if (folio_test_reserved(folio))
continue;
- if (!queue_pages_required(page, qp))
+ if (!queue_pages_required(&folio->page, qp))
continue;
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
/* MPOL_MF_STRICT must be specified if we get here */
@@ -544,7 +544,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
* temporary off LRU pages in the range. Still
* need migrate other LRU pages.
*/
- if (migrate_page_add(page, qp->pagelist, flags))
+ if (migrate_page_add(&folio->page, qp->pagelist, flags))
has_unmovable = true;
} else
break;
@@ -703,7 +703,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
static const struct mm_walk_ops queue_pages_walk_ops = {
.hugetlb_entry = queue_pages_hugetlb,
- .pmd_entry = queue_pages_pte_range,
+ .pmd_entry = queue_folios_pte_range,
.test_walk = queue_pages_test_walk,
};
--
2.38.1