[RFC v9 PATCH 4/4] mm: unmap VM_PFNMAP mappings with optimized path
From: Yang Shi
Date: Tue Sep 11 2018 - 16:58:42 EST
When unmapping VM_PFNMAP mappings, vm flags need to be updated. Since
the vmas have been detached, so it sounds safe to update vm flags with
read mmap_sem.
Cc: Michal Hocko <mhocko@xxxxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxx>
Signed-off-by: Yang Shi <yang.shi@xxxxxxxxxxxxxxxxx>
---
mm/mmap.c | 15 +--------------
1 file changed, 1 insertion(+), 14 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 086f8b5..0b6b231 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2778,7 +2778,7 @@ static int do_munmap_zap_rlock(struct mm_struct *mm, unsigned long start,
size_t len, struct list_head *uf)
{
unsigned long end;
- struct vm_area_struct *start_vma, *prev, *vma;
+ struct vm_area_struct *start_vma, *prev;
int ret = 0;
if (!addr_ok(start, len))
@@ -2811,16 +2811,6 @@ static int do_munmap_zap_rlock(struct mm_struct *mm, unsigned long start,
goto out;
}
- /*
- * Unmapping vmas, which have VM_PFNMAP
- * need get done with write mmap_sem held since they may update
- * vm_flags. Deal with such mappings with regular do_munmap() call.
- */
- for (vma = start_vma; vma && vma->vm_start < end; vma = vma->vm_next) {
- if (vma->vm_flags & VM_PFNMAP)
- goto regular_path;
- }
-
/* Handle mlocked vmas */
if (mm->locked_vm)
munlock_vmas(start_vma, end);
@@ -2844,9 +2834,6 @@ static int do_munmap_zap_rlock(struct mm_struct *mm, unsigned long start,
return 0;
-regular_path:
- ret = do_munmap(mm, start, len, uf);
-
out:
up_write(&mm->mmap_sem);
return ret;
--
1.8.3.1