[PATCH v2] mm/memory: update stale locking comments for fault handlers

From: Aditya Sharma

Date: Sun Apr 05 2026 - 13:22:53 EST


Update the comments for wp_page_copy(), do_wp_page(), do_swap_page(),
do_anonymous_page(), __do_fault(), do_fault(), handle_pte_fault(),
__handle_mm_fault(), and handle_mm_fault() to concisely clarify that
they can be entered holding either the mmap_lock or the VMA lock,
and that the lock may be released upon returning VM_FAULT_RETRY.

Additionally, make the following corrections:
- In do_anonymous_page(), correct the outdated claim that the function
is entered with the PTE "mapped but not yet locked". Since
handle_pte_fault() unmaps the empty PTE before routing to
do_pte_missing(), the comment now correctly states it is entered
with the PTE unmapped and unlocked.
- In __do_fault(), update the stale reference from __lock_page_retry()
to __folio_lock_or_retry().

Signed-off-by: Aditya Sharma <adi.sharma@xxxxxxxxxxx>
---
v2:
- Simplified the comment to concisely state "either the VMA lock or
the mmap_lock" instead of a verbose explanation (per David Hildenbrand).
- Expanded the scope to cover 8 other fault handlers in mm/memory.c
that suffered from the same stale mmap_lock comments.
- Fixed an additional historical inaccuracy in do_anonymous_page()
regarding the PTE mapping state on entry.
- Updated a stale reference in __do_fault() from __lock_page_retry()
to __folio_lock_or_retry().


mm/memory.c | 49 ++++++++++++++++++++++++++-----------------------
1 file changed, 26 insertions(+), 23 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index c65e82c86..2b407e3f9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3742,8 +3742,8 @@ vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
* Handle the case of a page which we actually need to copy to a new page,
* either due to COW or unsharing.
*
- * Called with mmap_lock locked and the old page referenced, but
- * without the ptl held.
+ * Called with either the VMA lock or the mmap_lock (FAULT_FLAG_VMA_LOCK
+ * tells you which) and the old page referenced, but without the ptl held.
*
* High level logic flow:
*
@@ -4142,9 +4142,9 @@ static bool wp_can_reuse_anon_folio(struct folio *folio,
* though the page will change only once the write actually happens. This
* avoids a few races, and potentially makes it more efficient.
*
- * We enter with non-exclusive mmap_lock (to exclude vma changes,
- * but allow concurrent faults), with pte both mapped and locked.
- * We return with mmap_lock still held, but pte unmapped and unlocked.
+ * We enter with either the VMA lock or the mmap_lock (FAULT_FLAG_VMA_LOCK
+ * tells you which), and pte both mapped and locked. We return with
+ * the same lock still held, but pte unmapped and unlocked.
*/
static vm_fault_t do_wp_page(struct vm_fault *vmf)
__releases(vmf->ptl)
@@ -4696,11 +4696,11 @@ static void check_swap_exclusive(struct folio *folio, swp_entry_t entry,
}

/*
- * We enter with non-exclusive mmap_lock (to exclude vma changes,
- * but allow concurrent faults), and pte mapped but not yet locked.
+ * We enter with either the VMA lock or the mmap_lock (FAULT_FLAG_VMA_LOCK
+ * tells you which), and pte mapped but not yet locked.
* We return with pte unmapped and unlocked.
*
- * We return with the mmap_lock locked or unlocked in the same cases
+ * We return with the lock locked or unlocked in the same cases
* as does filemap_fault().
*/
vm_fault_t do_swap_page(struct vm_fault *vmf)
@@ -5210,9 +5210,10 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
}

/*
- * We enter with non-exclusive mmap_lock (to exclude vma changes,
- * but allow concurrent faults), and pte mapped but not yet locked.
- * We return with mmap_lock still held, but pte unmapped and unlocked.
+ * We enter with either the VMA lock or the mmap_lock (FAULT_FLAG_VMA_LOCK
+ * tells you which), and pte unmapped and unlocked.
+ * We return with the lock still held, but pte unmapped and unlocked.
+ * If VM_FAULT_RETRY is returned, the lock may have been released.
*/
static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
{
@@ -5330,9 +5331,10 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
}

/*
- * The mmap_lock must have been held on entry, and may have been
- * released depending on flags and vma->vm_ops->fault() return value.
- * See filemap_fault() and __lock_page_retry().
+ * Either the VMA lock or the mmap_lock must have been held on entry,
+ * and may have been released depending on flags and vma->vm_ops->fault()
+ * return value.
+ * See filemap_fault() and __folio_lock_or_retry().
*/
static vm_fault_t __do_fault(struct vm_fault *vmf)
{
@@ -5893,11 +5895,11 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
}

/*
- * We enter with non-exclusive mmap_lock (to exclude vma changes,
- * but allow concurrent faults).
- * The mmap_lock may have been released depending on flags and our
+ * We enter with either the VMA lock or the mmap_lock (FAULT_FLAG_VMA_LOCK
+ * tells you which).
+ * The lock may have been released depending on flags and our
* return value. See filemap_fault() and __folio_lock_or_retry().
- * If mmap_lock is released, vma may become invalid (for example
+ * If the lock is released, vma may become invalid (for example
* by other thread calling munmap()).
*/
static vm_fault_t do_fault(struct vm_fault *vmf)
@@ -6264,10 +6266,11 @@ static void fix_spurious_fault(struct vm_fault *vmf,
* with external mmu caches can use to update those (ie the Sparc or
* PowerPC hashed page tables that act as extended TLBs).
*
- * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
- * concurrent faults).
+ * On entry, we hold either the VMA lock or the mmap_lock
+ * (FAULT_FLAG_VMA_LOCK tells you which).
*
- * The mmap_lock may have been released depending on flags and our return value.
+ * The mmap_lock or VMA lock may have been released depending on flags
+ * and our return value.
* See filemap_fault() and __folio_lock_or_retry().
*/
static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
@@ -6349,7 +6352,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
/*
* On entry, we hold either the VMA lock or the mmap_lock
* (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in
- * the result, the mmap_lock is not held on exit. See filemap_fault()
+ * the result, the lock is not held on exit. See filemap_fault()
* and __folio_lock_or_retry().
*/
static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
@@ -6583,7 +6586,7 @@ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
* By the time we get here, we already hold either the VMA lock or the
* mmap_lock (FAULT_FLAG_VMA_LOCK tells you which).
*
- * The mmap_lock may have been released depending on flags and our
+ * The lock may have been released depending on flags and our
* return value. See filemap_fault() and __folio_lock_or_retry().
*/
vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
--
2.34.1