[PATCH 4/9] mm/memory: Batch set uffd-wp markers during zapping

From: Dev Jain

Date: Tue Mar 10 2026 - 03:36:48 EST


In preparation for the next patch, enable batch setting of uffd-wp ptes.

The code paths passing nr > 1 to zap_install_uffd_wp_if_needed() produce
that nr through either folio_pte_batch or swap_pte_batch, guaranteeing that
all ptes are the same w.r.t belonging to the same type of VMA (anonymous
or non-anonymous, wp-armed or non-wp-armed), and all being marked with
uffd-wp or all being not marked.

Note that we will have to use set_pte_at() in a loop instead of set_ptes()
since the latter cannot handle present->non-present conversion for
nr_pages > 1.

Convert documentation of install_uffd_wp_ptes_if_needed to kerneldoc
format.

No functional change is intended.

Signed-off-by: Dev Jain <dev.jain@xxxxxxx>
---
include/linux/mm_inline.h | 37 +++++++++++++++++++++++--------------
mm/memory.c | 20 +-------------------
mm/rmap.c | 2 +-
3 files changed, 25 insertions(+), 34 deletions(-)

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index ad50688d89dba..d69b9abbdf2a7 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -560,21 +560,30 @@ static inline pte_marker copy_pte_marker(
return dstm;
}

-/*
- * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
- * replace a none pte. NOTE! This should only be called when *pte is already
+/**
+ * install_uffd_wp_ptes_if_needed - install uffd-wp marker on PTEs that map
+ * consecutive pages of the same large folio.
+ * @vma: The VMA the pages are mapped into.
+ * @addr: Address the first page of this batch is mapped at.
+ * @ptep: Page table pointer for the first entry of this batch.
+ * @pteval: old value of the entry pointed to by ptep.
+ * @nr: Number of entries to clear (batch size).
+ *
+ * If the ptes were wr-protected by uffd-wp in any form, arm special ptes to
+ * replace none ptes. NOTE! This should only be called when *pte is already
* cleared so we will never accidentally replace something valuable. Meanwhile
* none pte also means we are not demoting the pte so tlb flushed is not needed.
* E.g., when pte cleared the caller should have taken care of the tlb flush.
*
- * Must be called with pgtable lock held so that no thread will see the none
- * pte, and if they see it, they'll fault and serialize at the pgtable lock.
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. The PTEs are all in the same PMD
+ * and the same VMA.
*
- * Returns true if an uffd-wp pte was installed, false otherwise.
+ * Returns true if uffd-wp ptes were installed, false otherwise.
*/
static inline bool
-pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
- pte_t *pte, pte_t pteval)
+install_uffd_wp_ptes_if_needed(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *pte, pte_t pteval, unsigned int nr)
{
bool arm_uffd_pte = false;

@@ -604,13 +613,13 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
if (unlikely(pte_swp_uffd_wp_any(pteval)))
arm_uffd_pte = true;

- if (unlikely(arm_uffd_pte)) {
- set_pte_at(vma->vm_mm, addr, pte,
- make_pte_marker(PTE_MARKER_UFFD_WP));
- return true;
- }
+ if (likely(!arm_uffd_pte))
+ return false;

- return false;
+ for (int i = 0; i < nr; ++i, ++pte, addr += PAGE_SIZE)
+ set_pte_at(vma->vm_mm, addr, pte, make_pte_marker(PTE_MARKER_UFFD_WP));
+
+ return true;
}

static inline bool vma_has_recency(const struct vm_area_struct *vma)
diff --git a/mm/memory.c b/mm/memory.c
index 38062f8e11656..768646c0b3b6a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1594,29 +1594,11 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte, int nr,
struct zap_details *details, pte_t pteval)
{
- bool was_installed = false;
-
- if (!uffd_supports_wp_marker())
- return false;
-
- /* Zap on anonymous always means dropping everything */
- if (vma_is_anonymous(vma))
- return false;
-
if (zap_drop_markers(details))
return false;

- for (;;) {
- /* the PFN in the PTE is irrelevant. */
- if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval))
- was_installed = true;
- if (--nr == 0)
- break;
- pte++;
- addr += PAGE_SIZE;
- }
+ return install_uffd_wp_ptes_if_needed(vma, addr, pte, pteval, nr);

- return was_installed;
}

static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb,
diff --git a/mm/rmap.c b/mm/rmap.c
index a61978141ee3f..a7570cd037344 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2235,7 +2235,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* we may want to replace a none pte with a marker pte if
* it's file-backed, so we don't lose the tracking info.
*/
- pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
+ install_uffd_wp_ptes_if_needed(vma, address, pvmw.pte, pteval, 1);

/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
--
2.34.1