[PATCH v3 2/2] mm/mprotect: special-case small folios when applying write permissions
From: Pedro Falcato
Date: Thu Apr 02 2026 - 10:24:40 EST
The common order-0 case is important enough to want its own branch, and
avoids the hairy, large loop logic that the CPU does not seem to handle
particularly well.
While at it, encourage the compiler to inline batch PTE logic and resolve
constant branches by adding __always_inline strategically.
Suggested-by: David Hildenbrand (Arm) <david@xxxxxxxxxx>
Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@xxxxxxxxxx>
Tested-by: Luke Yang <luyang@xxxxxxxxxx>
Signed-off-by: Pedro Falcato <pfalcato@xxxxxxx>
---
mm/mprotect.c | 91 ++++++++++++++++++++++++++++++++-------------------
1 file changed, 57 insertions(+), 34 deletions(-)
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 5929ce792c7b..98da856e3a52 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -117,9 +117,9 @@ static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep,
}
/* Set nr_ptes number of ptes, starting from idx */
-static void prot_commit_flush_ptes(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep, pte_t oldpte, pte_t ptent, int nr_ptes,
- int idx, bool set_write, struct mmu_gather *tlb)
+static __always_inline void prot_commit_flush_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, pte_t oldpte, pte_t ptent,
+ int nr_ptes, int idx, bool set_write, struct mmu_gather *tlb)
{
/*
* Advance the position in the batch by idx; note that if idx > 0,
@@ -143,7 +143,7 @@ static void prot_commit_flush_ptes(struct vm_area_struct *vma, unsigned long add
* !PageAnonExclusive() pages, starting from start_idx. Caller must enforce
* that the ptes point to consecutive pages of the same anon large folio.
*/
-static int page_anon_exclusive_sub_batch(int start_idx, int max_len,
+static __always_inline int page_anon_exclusive_sub_batch(int start_idx, int max_len,
struct page *first_page, bool expected_anon_exclusive)
{
int idx;
@@ -169,7 +169,7 @@ static int page_anon_exclusive_sub_batch(int start_idx, int max_len,
* pte of the batch. Therefore, we must individually check all pages and
* retrieve sub-batches.
*/
-static void commit_anon_folio_batch(struct vm_area_struct *vma,
+static __always_inline void commit_anon_folio_batch(struct vm_area_struct *vma,
struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep,
pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
{
@@ -188,7 +188,7 @@ static void commit_anon_folio_batch(struct vm_area_struct *vma,
}
}
-static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
+static __always_inline void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma,
struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep,
pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb)
{
@@ -277,6 +277,45 @@ static long change_softleaf_pte(struct vm_area_struct *vma,
return 0;
}
+static __always_inline void change_present_ptes(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+ int nr_ptes, unsigned long end, pgprot_t newprot,
+ struct folio *folio, struct page *page, unsigned long cp_flags)
+{
+ const bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
+ const bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
+ pte_t ptent, oldpte;
+
+ oldpte = modify_prot_start_ptes(vma, addr, ptep, nr_ptes);
+ ptent = pte_modify(oldpte, newprot);
+
+ if (uffd_wp)
+ ptent = pte_mkuffd_wp(ptent);
+ else if (uffd_wp_resolve)
+ ptent = pte_clear_uffd_wp(ptent);
+
+ /*
+ * In some writable, shared mappings, we might want
+ * to catch actual write access -- see
+ * vma_wants_writenotify().
+ *
+ * In all writable, private mappings, we have to
+ * properly handle COW.
+ *
+ * In both cases, we can sometimes still change PTEs
+ * writable and avoid the write-fault handler, for
+ * example, if a PTE is already dirty and no other
+ * COW or special handling is required.
+ */
+ if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
+ !pte_write(ptent))
+ set_write_prot_commit_flush_ptes(vma, folio, page,
+ addr, ptep, oldpte, ptent, nr_ptes, tlb);
+ else
+ prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent,
+ nr_ptes, /* idx = */ 0, /* set_write = */ false, tlb);
+}
+
static long change_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t newprot, unsigned long cp_flags)
@@ -287,7 +326,6 @@ static long change_pte_range(struct mmu_gather *tlb,
bool is_private_single_threaded;
bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
- bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
int nr_ptes;
tlb_change_page_size(tlb, PAGE_SIZE);
@@ -308,7 +346,6 @@ static long change_pte_range(struct mmu_gather *tlb,
int max_nr_ptes = (end - addr) >> PAGE_SHIFT;
struct folio *folio = NULL;
struct page *page;
- pte_t ptent;
/* Already in the desired state. */
if (prot_numa && pte_protnone(oldpte))
@@ -334,34 +371,20 @@ static long change_pte_range(struct mmu_gather *tlb,
nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags);
- oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes);
- ptent = pte_modify(oldpte, newprot);
-
- if (uffd_wp)
- ptent = pte_mkuffd_wp(ptent);
- else if (uffd_wp_resolve)
- ptent = pte_clear_uffd_wp(ptent);
-
/*
- * In some writable, shared mappings, we might want
- * to catch actual write access -- see
- * vma_wants_writenotify().
- *
- * In all writable, private mappings, we have to
- * properly handle COW.
- *
- * In both cases, we can sometimes still change PTEs
- * writable and avoid the write-fault handler, for
- * example, if a PTE is already dirty and no other
- * COW or special handling is required.
+ * Optimize for the small-folio common case by
+ * special-casing it here. Compiler constant propagation
+ * plus copious amounts of __always_inline does wonders.
*/
- if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) &&
- !pte_write(ptent))
- set_write_prot_commit_flush_ptes(vma, folio, page,
- addr, pte, oldpte, ptent, nr_ptes, tlb);
- else
- prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent,
- nr_ptes, /* idx = */ 0, /* set_write = */ false, tlb);
+ if (likely(nr_ptes == 1)) {
+ change_present_ptes(tlb, vma, addr, pte, 1,
+ end, newprot, folio, page, cp_flags);
+ } else {
+ change_present_ptes(tlb, vma, addr, pte,
+ nr_ptes, end, newprot, folio, page,
+ cp_flags);
+ }
+
pages += nr_ptes;
} else if (pte_none(oldpte)) {
/*
--
2.53.0