Re: [PATCH -V7 04/14] hugetlb: use mmu_gather instead of a temporarylinked list for accumulating pages

From: Aneesh Kumar K.V
Date: Thu May 31 2012 - 01:36:50 EST


On Wed, May 30, 2012 at 06:56:36PM -0700, David Rientjes wrote:
> On Wed, 30 May 2012, Aneesh Kumar K.V wrote:
>
> > diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
> > index cc9281b..ff233e4 100644
> > --- a/fs/hugetlbfs/inode.c
> > +++ b/fs/hugetlbfs/inode.c
> > @@ -416,8 +416,8 @@ hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
> > else
> > v_offset = 0;
> >
> > - __unmap_hugepage_range(vma,
> > - vma->vm_start + v_offset, vma->vm_end, NULL);
> > + unmap_hugepage_range(vma, vma->vm_start + v_offset,
> > + vma->vm_end, NULL);
> > }
> > }
> >
> > diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> > index 217f528..c21e136 100644
> > --- a/include/linux/hugetlb.h
> > +++ b/include/linux/hugetlb.h
> > @@ -7,6 +7,7 @@
> >
> > struct ctl_table;
> > struct user_struct;
> > +struct mmu_gather;
> >
> > #ifdef CONFIG_HUGETLB_PAGE
> >
> > @@ -40,9 +41,10 @@ int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
> > struct page **, struct vm_area_struct **,
> > unsigned long *, int *, int, unsigned int flags);
> > void unmap_hugepage_range(struct vm_area_struct *,
> > - unsigned long, unsigned long, struct page *);
> > -void __unmap_hugepage_range(struct vm_area_struct *,
> > - unsigned long, unsigned long, struct page *);
> > + unsigned long, unsigned long, struct page *);
> > +void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vms,
>
> s/vms/vma/

done

>
> > + unsigned long start, unsigned long end,
> > + struct page *ref_page);
> > int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
> > void hugetlb_report_meminfo(struct seq_file *);
> > int hugetlb_report_node_meminfo(int, char *);
> > @@ -98,7 +100,6 @@ static inline unsigned long hugetlb_total_pages(void)
> > #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
> > #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
> > #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
> > -#define unmap_hugepage_range(vma, start, end, page) BUG()
> > static inline void hugetlb_report_meminfo(struct seq_file *m)
> > {
> > }
>
> Why?

unmap_hugepage_range is now not used outside CONFIG_HUGETLB_PAGE

>
> > @@ -112,13 +113,24 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
> > #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
> > #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
> > #define huge_pte_offset(mm, address) 0
> > -#define dequeue_hwpoisoned_huge_page(page) 0
> > +static inline int dequeue_hwpoisoned_huge_page(struct page *page)
> > +{
> > + return 0;
> > +}
> > +
>
> Unrelated from this patchset.

It throws a warning. Yes it can be a seperate patch. But was not sure whether to
move that one line patch outside

>
> > static inline void copy_huge_page(struct page *dst, struct page *src)
> > {
> > }
> >
> > #define hugetlb_change_protection(vma, address, end, newprot)
> >
> > +static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
> > + struct vm_area_struct *vma, unsigned long start,
> > + unsigned long end, struct page *ref_page)
> > +{
> > + BUG();
> > +}
> > +
>
> I think this should be done under the unmap_hugepage_range() definition
> you removed (and change it to be a static inline function as well).

Below is what unmap_hugepage_range is after all the change. And it doesn't get
used if CONFIG_HUGETLB_PAGE is not used. But we use __unmap_hugepage_range from
common code.If we get called with hugetlb not enabled, then that implies a BUG()

void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
struct mm_struct *mm;
struct mmu_gather tlb;

mm = vma->vm_mm;

tlb_gather_mmu(&tlb, mm, 0);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
tlb_finish_mmu(&tlb, start, end);
}



>
> > #endif /* !CONFIG_HUGETLB_PAGE */
> >
> > #define HUGETLB_ANON_FILE "anon_hugepage"
> > diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> > index 9b97a5c..704a269 100644
> > --- a/mm/hugetlb.c
> > +++ b/mm/hugetlb.c
> > @@ -24,8 +24,9 @@
> >
> > #include <asm/page.h>
> > #include <asm/pgtable.h>
> > -#include <linux/io.h>
> > +#include <asm/tlb.h>
> >
> > +#include <linux/io.h>
> > #include <linux/hugetlb.h>
> > #include <linux/node.h>
> > #include "internal.h"
> > @@ -2310,30 +2311,26 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
> > return 0;
> > }
> >
> > -void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
> > - unsigned long end, struct page *ref_page)
> > +void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
> > + unsigned long start, unsigned long end,
> > + struct page *ref_page)
> > {
> > + int force_flush = 0;
>
> Can this be bool?
>
> > struct mm_struct *mm = vma->vm_mm;
> > unsigned long address;
> > pte_t *ptep;
> > pte_t pte;
> > struct page *page;
> > - struct page *tmp;
> > struct hstate *h = hstate_vma(vma);
> > unsigned long sz = huge_page_size(h);
> >
> > - /*
> > - * A page gathering list, protected by per file i_mmap_mutex. The
> > - * lock is used to avoid list corruption from multiple unmapping
> > - * of the same page since we are using page->lru.
> > - */
> > - LIST_HEAD(page_list);
> > -
> > WARN_ON(!is_vm_hugetlb_page(vma));
> > BUG_ON(start & ~huge_page_mask(h));
> > BUG_ON(end & ~huge_page_mask(h));
> >
> > + tlb_start_vma(tlb, vma);
> > mmu_notifier_invalidate_range_start(mm, start, end);
> > +again:
> > spin_lock(&mm->page_table_lock);
> > for (address = start; address < end; address += sz) {
> > ptep = huge_pte_offset(mm, address);
> > @@ -2372,30 +2369,45 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
> > }
> >
> > pte = huge_ptep_get_and_clear(mm, address, ptep);
> > + tlb_remove_tlb_entry(tlb, ptep, address);
> > if (pte_dirty(pte))
> > set_page_dirty(page);
> > - list_add(&page->lru, &page_list);
> >
> > + page_remove_rmap(page);
> > + force_flush = !__tlb_remove_page(tlb, page);
> > + if (force_flush)
> > + break;
> > /* Bail out after unmapping reference page if supplied */
> > if (ref_page)
> > break;
> > }
> > - flush_tlb_range(vma, start, end);
> > spin_unlock(&mm->page_table_lock);
> > - mmu_notifier_invalidate_range_end(mm, start, end);
> > - list_for_each_entry_safe(page, tmp, &page_list, lru) {
> > - page_remove_rmap(page);
> > - list_del(&page->lru);
> > - put_page(page);
> > + /*
> > + * mmu_gather ran out of room to batch pages, we break out of
> > + * the PTE lock to avoid doing the potential expensive TLB invalidate
> > + * and page-free while holding it.
> > + */
> > + if (force_flush) {
> > + force_flush = 0;
> > + tlb_flush_mmu(tlb);
> > + if (address < end && !ref_page)
> > + goto again;
>
> Shouldn't be copying "start" at the beginning of this function and then
> updating that copy now and use it as the loop initialization?
>

I didn't want to make larger changes here. My goal was to switch to mmu_gather APIs
without changing other loop logic.

> > }
> > + mmu_notifier_invalidate_range_end(mm, start, end);
> > + tlb_end_vma(tlb, vma);
> > }
> >
> > void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
> > unsigned long end, struct page *ref_page)
> > {
> > - mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
> > - __unmap_hugepage_range(vma, start, end, ref_page);
> > - mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
> > + struct mm_struct *mm;
> > + struct mmu_gather tlb;
> > +
> > + mm = vma->vm_mm;
> > +
> > + tlb_gather_mmu(&tlb, mm, 0);
> > + __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
> > + tlb_finish_mmu(&tlb, start, end);
> > }
> >
> > /*
>


-aneesh

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/