Re: [PATCH] mm/pagewalk.c: walk_page_range should avoid VM_PFNMAPareas
From: David Rientjes
Date: Wed May 01 2013 - 11:47:20 EST
On Wed, 1 May 2013, Cliff Wickman wrote:
> Index: linux/mm/pagewalk.c
> ===================================================================
> --- linux.orig/mm/pagewalk.c
> +++ linux/mm/pagewalk.c
> @@ -127,22 +127,6 @@ static int walk_hugetlb_range(struct vm_
> return 0;
> }
>
> -static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
> -{
> - struct vm_area_struct *vma;
> -
> - /* We don't need vma lookup at all. */
> - if (!walk->hugetlb_entry)
> - return NULL;
> -
> - VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
> - vma = find_vma(walk->mm, addr);
> - if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
> - return vma;
> -
> - return NULL;
> -}
> -
> #else /* CONFIG_HUGETLB_PAGE */
> static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
> {
> @@ -200,28 +184,46 @@ int walk_page_range(unsigned long addr,
>
> pgd = pgd_offset(walk->mm, addr);
> do {
> - struct vm_area_struct *vma;
> + struct vm_area_struct *vma = NULL;
>
> next = pgd_addr_end(addr, end);
>
> /*
> - * handle hugetlb vma individually because pagetable walk for
> - * the hugetlb page is dependent on the architecture and
> - * we can't handled it in the same manner as non-huge pages.
> + * Check any special vma's within this range.
> */
> - vma = hugetlb_vma(addr, walk);
> + VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
I think this should be moved out of the iteration. It's currently inside
it even before your patch, but I think it's pointless.
> + vma = find_vma(walk->mm, addr);
> if (vma) {
> - if (vma->vm_end < next)
> + /*
> + * There are no page structures backing a VM_PFNMAP
> + * range, so allow no split_huge_page_pmd().
> + */
> + if (vma->vm_flags & VM_PFNMAP) {
> next = vma->vm_end;
> + pgd = pgd_offset(walk->mm, next);
> + continue;
> + }
What if end < vma->vm_end?
> /*
> - * Hugepage is very tightly coupled with vma, so
> - * walk through hugetlb entries within a given vma.
> + * Handle hugetlb vma individually because pagetable
> + * walk for the hugetlb page is dependent on the
> + * architecture and we can't handled it in the same
> + * manner as non-huge pages.
> */
> - err = walk_hugetlb_range(vma, addr, next, walk);
> - if (err)
> - break;
> - pgd = pgd_offset(walk->mm, next);
> - continue;
> + if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
> + is_vm_hugetlb_page(vma)) {
> + if (vma->vm_end < next)
> + next = vma->vm_end;
> + /*
> + * Hugepage is very tightly coupled with vma,
> + * so walk through hugetlb entries within a
> + * given vma.
> + */
> + err = walk_hugetlb_range(vma, addr, next, walk);
> + if (err)
> + break;
> + pgd = pgd_offset(walk->mm, next);
> + continue;
> + }
> }
>
> if (pgd_none_or_clear_bad(pgd)) {
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/