Re: [PATCH 17/19] mlock, thp: HACK: split all pages in VM_LOCKED vma
From: Naoya Horiguchi
Date: Wed Nov 19 2014 - 04:04:32 EST
On Wed, Nov 05, 2014 at 04:49:52PM +0200, Kirill A. Shutemov wrote:
> We don't yet handle mlocked pages properly with new THP refcounting.
> For now we split all pages in VMA on mlock and disallow khugepaged
> collapse pages in the VMA. If split failed on mlock() we fail the
> syscall with -EBUSY.
> ---
...
> @@ -542,6 +530,60 @@ next:
> }
> }
>
> +static int thp_split(pmd_t *pmd, unsigned long addr, unsigned long end,
> + struct mm_walk *walk)
> +{
> + spinlock_t *ptl;
> + struct page *page = NULL;
> + pte_t *pte;
> + int err = 0;
> +
> +retry:
> + if (pmd_none(*pmd))
> + return 0;
> + if (pmd_trans_huge(*pmd)) {
> + if (is_huge_zero_pmd(*pmd)) {
> + split_huge_pmd(walk->vma, pmd, addr);
> + return 0;
> + }
> + ptl = pmd_lock(walk->mm, pmd);
> + if (!pmd_trans_huge(*pmd)) {
> + spin_unlock(ptl);
> + goto retry;
> + }
> + page = pmd_page(*pmd);
> + VM_BUG_ON_PAGE(!PageHead(page), page);
> + get_page(page);
> + spin_unlock(ptl);
> + err = split_huge_page(page);
> + put_page(page);
> + return err;
> + }
> + pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
> + do {
> + if (!pte_present(*pte))
> + continue;
> + page = vm_normal_page(walk->vma, addr, *pte);
> + if (!page)
> + continue;
> + if (PageTransCompound(page)) {
> + page = compound_head(page);
> + get_page(page);
> + spin_unlock(ptl);
> + err = split_huge_page(page);
> + spin_lock(ptl);
> + put_page(page);
> + if (!err) {
> + VM_BUG_ON_PAGE(compound_mapcount(page), page);
> + VM_BUG_ON_PAGE(PageTransCompound(page), page);
If split_huge_page() succeeded, we don't have to continue the iteration,
so break this loop here?
Thanks,
Naoya Horiguchi
> + } else
> + break;
> + }
> + } while (pte++, addr += PAGE_SIZE, addr != end);
> + pte_unmap_unlock(pte - 1, ptl);
> + return err;
> +}
> +
> /*
> * mlock_fixup - handle mlock[all]/munlock[all] requests.
> *--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/