[PATCH v2 37/44] mm/mmap: Use vma_prepare() and vma_complete() in vma_expand()
From: Liam Howlett
Date: Thu Jan 05 2023 - 14:22:03 EST
From: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx>
Use the new locking functions for vma_expand(). This reduces code
duplication.
At the same time change VM_BUG_ON() to VM_WARN_ON()
Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx>
---
mm/mmap.c | 189 +++++++++++++++++++++---------------------------------
1 file changed, 73 insertions(+), 116 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 3cf08aaee17d..9546d5811ca9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -519,122 +519,6 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
return 0;
}
-/*
- * vma_expand - Expand an existing VMA
- *
- * @mas: The maple state
- * @vma: The vma to expand
- * @start: The start of the vma
- * @end: The exclusive end of the vma
- * @pgoff: The page offset of vma
- * @next: The current of next vma.
- *
- * Expand @vma to @start and @end. Can expand off the start and end. Will
- * expand over @next if it's different from @vma and @end == @next->vm_end.
- * Checking if the @vma can expand and merge with @next needs to be handled by
- * the caller.
- *
- * Returns: 0 on success
- */
-inline int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
- unsigned long start, unsigned long end, pgoff_t pgoff,
- struct vm_area_struct *next)
-{
- struct mm_struct *mm = vma->vm_mm;
- struct address_space *mapping = NULL;
- struct rb_root_cached *root = NULL;
- struct anon_vma *anon_vma = vma->anon_vma;
- struct file *file = vma->vm_file;
- bool remove_next = false;
-
- if (next && (vma != next) && (end == next->vm_end)) {
- remove_next = true;
- if (next->anon_vma && !vma->anon_vma) {
- int error;
-
- anon_vma = next->anon_vma;
- vma->anon_vma = anon_vma;
- error = anon_vma_clone(vma, next);
- if (error)
- return error;
- }
- }
-
- /* Not merging but overwriting any part of next is not handled. */
- VM_BUG_ON(next && !remove_next && next != vma && end > next->vm_start);
- /* Only handles expanding */
- VM_BUG_ON(vma->vm_start < start || vma->vm_end > end);
-
- if (vma_iter_prealloc(vmi, vma))
- goto nomem;
-
- vma_adjust_trans_huge(vma, start, end, 0);
-
- if (file) {
- mapping = file->f_mapping;
- root = &mapping->i_mmap;
- uprobe_munmap(vma, vma->vm_start, vma->vm_end);
- i_mmap_lock_write(mapping);
- }
-
- if (anon_vma) {
- anon_vma_lock_write(anon_vma);
- anon_vma_interval_tree_pre_update_vma(vma);
- }
-
- if (file) {
- flush_dcache_mmap_lock(mapping);
- vma_interval_tree_remove(vma, root);
- }
-
- /* VMA iterator points to previous, so set to start if necessary */
- if (vma_iter_addr(vmi) != start)
- vma_iter_set(vmi, start);
-
- vma->vm_start = start;
- vma->vm_end = end;
- vma->vm_pgoff = pgoff;
- vma_iter_store(vmi, vma);
-
- if (file) {
- vma_interval_tree_insert(vma, root);
- flush_dcache_mmap_unlock(mapping);
- }
-
- /* Expanding over the next vma */
- if (remove_next && file) {
- __remove_shared_vm_struct(next, file, mapping);
- }
-
- if (anon_vma) {
- anon_vma_interval_tree_post_update_vma(vma);
- anon_vma_unlock_write(anon_vma);
- }
-
- if (file) {
- i_mmap_unlock_write(mapping);
- uprobe_mmap(vma);
- }
-
- if (remove_next) {
- if (file) {
- uprobe_munmap(next, next->vm_start, next->vm_end);
- fput(file);
- }
- if (next->anon_vma)
- anon_vma_merge(vma, next);
- mm->map_count--;
- mpol_put(vma_policy(next));
- vm_area_free(next);
- }
-
- validate_mm(mm);
- return 0;
-
-nomem:
- return -ENOMEM;
-}
-
/*
* vma_prepare() - Helper function for handling locking VMAs prior to altering
* @vp: The initialized vma_prepare struct
@@ -756,6 +640,79 @@ static inline void vma_complete(struct vma_prepare *vp,
uprobe_mmap(vp->insert);
}
+/*
+ * vma_expand - Expand an existing VMA
+ *
+ * @vmi: The vma iterator
+ * @vma: The vma to expand
+ * @start: The start of the vma
+ * @end: The exclusive end of the vma
+ * @pgoff: The page offset of vma
+ * @next: The current of next vma.
+ *
+ * Expand @vma to @start and @end. Can expand off the start and end. Will
+ * expand over @next if it's different from @vma and @end == @next->vm_end.
+ * Checking if the @vma can expand and merge with @next needs to be handled by
+ * the caller.
+ *
+ * Returns: 0 on success
+ */
+inline int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff,
+ struct vm_area_struct *next)
+
+{
+ struct vma_prepare vp;
+
+ memset(&vp, 0, sizeof(vp));
+ vp.vma = vma;
+ vp.anon_vma = vma->anon_vma;
+ if (next && (vma != next) && (end == next->vm_end)) {
+ vp.remove = next;
+ if (next->anon_vma && !vma->anon_vma) {
+ int error;
+
+ vp.anon_vma = next->anon_vma;
+ vma->anon_vma = next->anon_vma;
+ error = anon_vma_clone(vma, next);
+ if (error)
+ return error;
+ }
+ }
+
+ /* Not merging but overwriting any part of next is not handled. */
+ VM_WARN_ON(next && !vp.remove &&
+ next != vma && end > next->vm_start);
+ /* Only handles expanding */
+ VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
+
+ if (vma_iter_prealloc(vmi, vma))
+ goto nomem;
+
+ vma_adjust_trans_huge(vma, start, end, 0);
+
+ vp.file = vma->vm_file;
+ if (vp.file)
+ vp.mapping = vp.file->f_mapping;
+
+ /* VMA iterator points to previous, so set to start if necessary */
+ if (vma_iter_addr(vmi) != start)
+ vma_iter_set(vmi, start);
+
+ vma_prepare(&vp);
+ vma->vm_start = start;
+ vma->vm_end = end;
+ vma->vm_pgoff = pgoff;
+ /* Note: mas must be pointing to the expanding VMA */
+ vma_iter_store(vmi, vma);
+
+ vma_complete(&vp, vmi, vma->vm_mm);
+ validate_mm(vma->vm_mm);
+ return 0;
+
+nomem:
+ return -ENOMEM;
+}
/*
* We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
* is already present in an i_mmap tree without adjusting the tree.
--
2.35.1