[RFC 1/2] mm/mthp: add pte_range_none_or_zeropfn() helper
From: Lance Yang
Date: Sun Jan 19 2025 - 20:23:05 EST
In preparation for relaxing the mTHP PTE mapping restriction on the demand
zero page. We intend to provide more opportunities to take advantage of
mTHP.
Signed-off-by: Mingzhe Yang <mingzhe.yang@xxxxxx>
Signed-off-by: Lance Yang <ioworker0@xxxxxxxxx>
---
mm/memory.c | 43 ++++++++++++++++++++++++++++++++++++++++---
1 file changed, 40 insertions(+), 3 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 2a20e3810534..4e148309b3e0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4724,18 +4724,55 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
return ret;
}
-static bool pte_range_none(pte_t *pte, int nr_pages)
+/* Flags for __pte_range_check(). */
+typedef int __bitwise prc_t;
+
+/* Check if PTEs are not present. */
+#define PRC_CHECK_NONE ((__force prc_t)BIT(0))
+
+/* Check if PTEs are mapped to the zero page. */
+#define PRC_CHECK_ZEROPFN ((__force prc_t)BIT(1))
+
+static bool __pte_range_check(pte_t *pte, int nr_pages, prc_t flags,
+ bool *any_zeropfn)
{
int i;
+ pte_t ptent;
+
+ if (any_zeropfn)
+ *any_zeropfn = false;
for (i = 0; i < nr_pages; i++) {
- if (!pte_none(ptep_get_lockless(pte + i)))
- return false;
+ ptent = ptep_get_lockless(pte + i);
+
+ if ((flags & PRC_CHECK_NONE) && pte_none(ptent))
+ continue;
+
+ if ((flags & PRC_CHECK_ZEROPFN) &&
+ is_zero_pfn(pte_pfn(ptent))) {
+ if (any_zeropfn)
+ *any_zeropfn = true;
+ continue;
+ }
+
+ return false;
}
return true;
}
+static inline bool pte_range_none(pte_t *pte, int nr_pages)
+{
+ return __pte_range_check(pte, nr_pages, PRC_CHECK_NONE, NULL);
+}
+
+static inline bool pte_range_none_or_zeropfn(pte_t *pte, int nr_pages,
+ bool *any_zeropfn)
+{
+ return __pte_range_check(
+ pte, nr_pages, PRC_CHECK_NONE | PRC_CHECK_ZEROPFN, any_zeropfn);
+}
+
static struct folio *alloc_anon_folio(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
--
2.45.2