[PATCH v3 04/15] userfaultfd: introduce mfill_get_vma() and mfill_put_vma()

From: Mike Rapoport

Date: Mon Mar 30 2026 - 06:19:13 EST


From: "Mike Rapoport (Microsoft)" <rppt@xxxxxxxxxx>

Split the code that finds, locks and verifies VMA from mfill_atomic() into
a helper function.

This function will be used later during refactoring of
mfill_atomic_pte_copy().

Add a counterpart mfill_put_vma() helper that unlocks the VMA and releases
map_changing_lock.

[avagin@xxxxxxxxxx: fix lock leak in mfill_get_vma()]
Link: https://lkml.kernel.org/r/20260316173829.1126728-1-avagin@xxxxxxxxxx
Signed-off-by: Mike Rapoport (Microsoft) <rppt@xxxxxxxxxx>
Signed-off-by: Andrei Vagin <avagin@xxxxxxxxxx>
---
mm/userfaultfd.c | 126 ++++++++++++++++++++++++++++-------------------
1 file changed, 75 insertions(+), 51 deletions(-)

diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 291e5cfed431..c6a38db45343 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -157,6 +157,75 @@ static void uffd_mfill_unlock(struct vm_area_struct *vma)
}
#endif

+static void mfill_put_vma(struct mfill_state *state)
+{
+ if (!state->vma)
+ return;
+
+ up_read(&state->ctx->map_changing_lock);
+ uffd_mfill_unlock(state->vma);
+ state->vma = NULL;
+}
+
+static int mfill_get_vma(struct mfill_state *state)
+{
+ struct userfaultfd_ctx *ctx = state->ctx;
+ uffd_flags_t flags = state->flags;
+ struct vm_area_struct *dst_vma;
+ int err;
+
+ /*
+ * Make sure the vma is not shared, that the dst range is
+ * both valid and fully within a single existing vma.
+ */
+ dst_vma = uffd_mfill_lock(ctx->mm, state->dst_start, state->len);
+ if (IS_ERR(dst_vma))
+ return PTR_ERR(dst_vma);
+
+ /*
+ * If memory mappings are changing because of non-cooperative
+ * operation (e.g. mremap) running in parallel, bail out and
+ * request the user to retry later
+ */
+ down_read(&ctx->map_changing_lock);
+ state->vma = dst_vma;
+ err = -EAGAIN;
+ if (atomic_read(&ctx->mmap_changing))
+ goto out_unlock;
+
+ err = -EINVAL;
+
+ /*
+ * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
+ * it will overwrite vm_ops, so vma_is_anonymous must return false.
+ */
+ if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
+ dst_vma->vm_flags & VM_SHARED))
+ goto out_unlock;
+
+ /*
+ * validate 'mode' now that we know the dst_vma: don't allow
+ * a wrprotect copy if the userfaultfd didn't register as WP.
+ */
+ if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
+ goto out_unlock;
+
+ if (is_vm_hugetlb_page(dst_vma))
+ return 0;
+
+ if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
+ goto out_unlock;
+ if (!vma_is_shmem(dst_vma) &&
+ uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
+ goto out_unlock;
+
+ return 0;
+
+out_unlock:
+ mfill_put_vma(state);
+ return err;
+}
+
static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
@@ -767,8 +836,6 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
.src_addr = src_start,
.dst_addr = dst_start,
};
- struct mm_struct *dst_mm = ctx->mm;
- struct vm_area_struct *dst_vma;
long copied = 0;
ssize_t err;

@@ -783,57 +850,17 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
VM_WARN_ON_ONCE(dst_start + len <= dst_start);

retry:
- /*
- * Make sure the vma is not shared, that the dst range is
- * both valid and fully within a single existing vma.
- */
- dst_vma = uffd_mfill_lock(dst_mm, dst_start, len);
- if (IS_ERR(dst_vma)) {
- err = PTR_ERR(dst_vma);
+ err = mfill_get_vma(&state);
+ if (err)
goto out;
- }
-
- /*
- * If memory mappings are changing because of non-cooperative
- * operation (e.g. mremap) running in parallel, bail out and
- * request the user to retry later
- */
- down_read(&ctx->map_changing_lock);
- err = -EAGAIN;
- if (atomic_read(&ctx->mmap_changing))
- goto out_unlock;
-
- err = -EINVAL;
- /*
- * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
- * it will overwrite vm_ops, so vma_is_anonymous must return false.
- */
- if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
- dst_vma->vm_flags & VM_SHARED))
- goto out_unlock;
-
- /*
- * validate 'mode' now that we know the dst_vma: don't allow
- * a wrprotect copy if the userfaultfd didn't register as WP.
- */
- if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
- goto out_unlock;

/*
* If this is a HUGETLB vma, pass off to appropriate routine
*/
- if (is_vm_hugetlb_page(dst_vma))
- return mfill_atomic_hugetlb(ctx, dst_vma, dst_start,
+ if (is_vm_hugetlb_page(state.vma))
+ return mfill_atomic_hugetlb(ctx, state.vma, dst_start,
src_start, len, flags);

- if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
- goto out_unlock;
- if (!vma_is_shmem(dst_vma) &&
- uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE))
- goto out_unlock;
-
- state.vma = dst_vma;
-
while (state.src_addr < src_start + len) {
VM_WARN_ON_ONCE(state.dst_addr >= dst_start + len);

@@ -852,8 +879,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
if (unlikely(err == -ENOENT)) {
void *kaddr;

- up_read(&ctx->map_changing_lock);
- uffd_mfill_unlock(state.vma);
+ mfill_put_vma(&state);
VM_WARN_ON_ONCE(!state.folio);

kaddr = kmap_local_folio(state.folio, 0);
@@ -882,9 +908,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx,
break;
}

-out_unlock:
- up_read(&ctx->map_changing_lock);
- uffd_mfill_unlock(state.vma);
+ mfill_put_vma(&state);
out:
if (state.folio)
folio_put(state.folio);
--
2.53.0