Re: [PATCH v3 03/16] mm/mmap: Introduce vmi_complete_munmap_vmas()

From: Suren Baghdasaryan
Date: Wed Jul 10 2024 - 12:08:15 EST


On Thu, Jul 4, 2024 at 11:27 AM Liam R. Howlett <Liam.Howlett@xxxxxxxxxx> wrote:
>
> Extract all necessary operations that need to be completed after the vma
> maple tree is updated from a munmap() operation. Extracting this makes
> the later patch in the series easier to understand.
>
> Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx>
> ---
> mm/mmap.c | 79 +++++++++++++++++++++++++++++++++++++------------------
> 1 file changed, 53 insertions(+), 26 deletions(-)
>
> diff --git a/mm/mmap.c b/mm/mmap.c
> index d572e1ff8255..411798f46932 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -2605,6 +2605,56 @@ static inline void abort_munmap_vmas(struct ma_state *mas_detach)
> __mt_destroy(mas_detach->tree);
> }
>
> +/*
> + * vmi_complete_munmap_vmas() - Finish the munmap() operation
> + * @vmi: The vma iterator

You are missing vma and mm documentation. With that fixed

Reviewed-by: Suren Baghdasaryan <surenb@xxxxxxxxxx>


> + * @start: The start address
> + * @end: The end address
> + * @unlock: Unlock the mm or not
> + * @mas_detach: them maple state of the detached vma maple tree
> + * @locked_vm: The locked_vm count in the detached vmas
> + *
> + * This function updates the mm_struct, unmaps the region, frees the resources
> + * used for the munmap() and may downgrade the lock - if requested. Everything
> + * needed to be done once the vma maple tree is updated.
> + */
> +static void
> +vmi_complete_munmap_vmas(struct vma_iterator *vmi, struct vm_area_struct *vma,
> + struct mm_struct *mm, unsigned long start,
> + unsigned long end, bool unlock, struct ma_state *mas_detach,
> + unsigned long locked_vm)
> +{
> + struct vm_area_struct *prev, *next;
> + int count;
> +
> + count = mas_detach->index + 1;
> + mm->map_count -= count;
> + mm->locked_vm -= locked_vm;
> + if (unlock)
> + mmap_write_downgrade(mm);
> +
> + prev = vma_iter_prev_range(vmi);
> + next = vma_next(vmi);
> + if (next)
> + vma_iter_prev_range(vmi);
> +
> + /*
> + * We can free page tables without write-locking mmap_lock because VMAs
> + * were isolated before we downgraded mmap_lock.
> + */
> + mas_set(mas_detach, 1);
> + unmap_region(mm, mas_detach, vma, prev, next, start, end, count,
> + !unlock);
> + /* Statistics and freeing VMAs */
> + mas_set(mas_detach, 0);
> + remove_mt(mm, mas_detach);
> + validate_mm(mm);
> + if (unlock)
> + mmap_read_unlock(mm);
> +
> + __mt_destroy(mas_detach->tree);
> +}
> +
> /*
> * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
> * @vmi: The vma iterator
> @@ -2624,7 +2674,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
> struct mm_struct *mm, unsigned long start,
> unsigned long end, struct list_head *uf, bool unlock)
> {
> - struct vm_area_struct *prev, *next = NULL;
> + struct vm_area_struct *next = NULL;
> struct maple_tree mt_detach;
> int count = 0;
> int error = -ENOMEM;
> @@ -2728,31 +2778,8 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
> goto clear_tree_failed;
>
> /* Point of no return */
> - mm->locked_vm -= locked_vm;
> - mm->map_count -= count;
> - if (unlock)
> - mmap_write_downgrade(mm);
> -
> - prev = vma_iter_prev_range(vmi);
> - next = vma_next(vmi);
> - if (next)
> - vma_iter_prev_range(vmi);
> -
> - /*
> - * We can free page tables without write-locking mmap_lock because VMAs
> - * were isolated before we downgraded mmap_lock.
> - */
> - mas_set(&mas_detach, 1);
> - unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
> - !unlock);
> - /* Statistics and freeing VMAs */
> - mas_set(&mas_detach, 0);
> - remove_mt(mm, &mas_detach);
> - validate_mm(mm);
> - if (unlock)
> - mmap_read_unlock(mm);
> -
> - __mt_destroy(&mt_detach);
> + vmi_complete_munmap_vmas(vmi, vma, mm, start, end, unlock, &mas_detach,
> + locked_vm);
> return 0;
>
> clear_tree_failed:
> --
> 2.43.0
>