[PATCH 07/28] mm/mmap: Introduce unlock_range() for code cleanup
From: Liam R. Howlett
Date: Thu Dec 10 2020 - 13:17:00 EST
Signed-off-by: Liam R. Howlett <Liam.Howlett@xxxxxxxxxx>
---
mm/mmap.c | 37 ++++++++++++++++++-------------------
1 file changed, 18 insertions(+), 19 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 5306a865e5973..6999c2bcf0496 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2993,6 +2993,20 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
return __split_vma(mm, vma, addr, new_below);
}
+static inline void unlock_range(struct vm_area_struct *start, unsigned long limit)
+{
+ struct mm_struct *mm = start->vm_mm;
+ struct vm_area_struct *tmp = start;
+
+ while (tmp && tmp->vm_start < limit) {
+ if (tmp->vm_flags & VM_LOCKED) {
+ mm->locked_vm -= vma_pages(tmp);
+ munlock_vma_pages_all(tmp);
+ }
+
+ tmp = tmp->vm_next;
+ }
+}
/* Munmap is split into 2 main parts -- this part which finds
* what needs doing, and the areas themselves, which do the
* work. This now handles partial unmappings.
@@ -3081,17 +3095,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
/*
* unlock any mlock()ed ranges before detaching vmas
*/
- if (mm->locked_vm) {
- struct vm_area_struct *tmp = vma;
- while (tmp && tmp->vm_start < end) {
- if (tmp->vm_flags & VM_LOCKED) {
- mm->locked_vm -= vma_pages(tmp);
- munlock_vma_pages_all(tmp);
- }
-
- tmp = tmp->vm_next;
- }
- }
+ if (mm->locked_vm)
+ unlock_range(vma, end);
/* Detach vmas from rbtree */
if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
@@ -3394,14 +3399,8 @@ void exit_mmap(struct mm_struct *mm)
mmap_write_unlock(mm);
}
- if (mm->locked_vm) {
- vma = mm->mmap;
- while (vma) {
- if (vma->vm_flags & VM_LOCKED)
- munlock_vma_pages_all(vma);
- vma = vma->vm_next;
- }
- }
+ if (mm->locked_vm)
+ unlock_range(mm->mmap, ULONG_MAX);
arch_exit_mmap(mm);
--
2.28.0