Re: [External] Re: [PATCH v5 18/21] mm/hugetlb: Merge pte to huge pmd only for gigantic page
From: Muchun Song
Date: Fri Nov 20 2020 - 05:42:07 EST
On Fri, Nov 20, 2020 at 4:24 PM Michal Hocko <mhocko@xxxxxxxx> wrote:
>
> On Fri 20-11-20 14:43:22, Muchun Song wrote:
> > Merge pte to huge pmd if it has ever been split. Now only support
> > gigantic page which's vmemmap pages size is an integer multiple of
> > PMD_SIZE. This is the simplest case to handle.
>
> I think it would be benefitial for anybody who plan to implement this
> for normal PMDs to document challenges while you still have them fresh
> in your mind.
Yeah, I agree with you. I will document it.
>
> > Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
> > ---
> > arch/x86/include/asm/hugetlb.h | 8 +++
> > mm/hugetlb_vmemmap.c | 118 ++++++++++++++++++++++++++++++++++++++++-
> > 2 files changed, 124 insertions(+), 2 deletions(-)
> >
> > diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
> > index c601fe042832..1de1c519a84a 100644
> > --- a/arch/x86/include/asm/hugetlb.h
> > +++ b/arch/x86/include/asm/hugetlb.h
> > @@ -12,6 +12,14 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
> > {
> > return pmd_large(*pmd);
> > }
> > +
> > +#define vmemmap_pmd_mkhuge vmemmap_pmd_mkhuge
> > +static inline pmd_t vmemmap_pmd_mkhuge(struct page *page)
> > +{
> > + pte_t entry = pfn_pte(page_to_pfn(page), PAGE_KERNEL_LARGE);
> > +
> > + return __pmd(pte_val(entry));
> > +}
> > #endif
> >
> > #define hugepages_supported() boot_cpu_has(X86_FEATURE_PSE)
> > diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> > index c958699d1393..bf2b6b3e75af 100644
> > --- a/mm/hugetlb_vmemmap.c
> > +++ b/mm/hugetlb_vmemmap.c
> > @@ -144,6 +144,14 @@ static inline bool vmemmap_pmd_huge(pmd_t *pmd)
> > }
> > #endif
> >
> > +#ifndef vmemmap_pmd_mkhuge
> > +#define vmemmap_pmd_mkhuge vmemmap_pmd_mkhuge
> > +static inline pmd_t vmemmap_pmd_mkhuge(struct page *page)
> > +{
> > + return pmd_mkhuge(mk_pmd(page, PAGE_KERNEL));
> > +}
> > +#endif
> > +
> > static bool hugetlb_free_vmemmap_disabled __initdata;
> >
> > static int __init early_hugetlb_free_vmemmap_param(char *buf)
> > @@ -422,6 +430,104 @@ static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
> > }
> > }
> >
> > +static void __replace_huge_page_pte_vmemmap(pte_t *ptep, unsigned long start,
> > + unsigned int nr, struct page *huge,
> > + struct list_head *free_pages)
> > +{
> > + unsigned long addr;
> > + unsigned long end = start + (nr << PAGE_SHIFT);
> > + pgprot_t pgprot = PAGE_KERNEL;
> > +
> > + for (addr = start; addr < end; addr += PAGE_SIZE, ptep++) {
> > + struct page *page;
> > + pte_t old = *ptep;
> > + pte_t entry;
> > +
> > + prepare_vmemmap_page(huge);
> > +
> > + entry = mk_pte(huge++, pgprot);
> > + VM_WARN_ON(!pte_present(old));
> > + page = pte_page(old);
> > + list_add(&page->lru, free_pages);
> > +
> > + set_pte_at(&init_mm, addr, ptep, entry);
> > + }
> > +}
> > +
> > +static void replace_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
> > + struct page *huge,
> > + struct list_head *free_pages)
> > +{
> > + unsigned long end = start + VMEMMAP_HPAGE_SIZE;
> > +
> > + flush_cache_vunmap(start, end);
> > + __replace_huge_page_pte_vmemmap(pte_offset_kernel(pmd, start), start,
> > + VMEMMAP_HPAGE_NR, huge, free_pages);
> > + flush_tlb_kernel_range(start, end);
> > +}
> > +
> > +static pte_t *merge_vmemmap_pte(pmd_t *pmdp, unsigned long addr)
> > +{
> > + pte_t *pte;
> > + struct page *page;
> > +
> > + pte = pte_offset_kernel(pmdp, addr);
> > + page = pte_page(*pte);
> > + set_pmd(pmdp, vmemmap_pmd_mkhuge(page));
> > +
> > + return pte;
> > +}
> > +
> > +static void merge_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
> > + struct page *huge,
> > + struct list_head *free_pages)
> > +{
> > + replace_huge_page_pmd_vmemmap(pmd, start, huge, free_pages);
> > + pte_free_kernel(&init_mm, merge_vmemmap_pte(pmd, start));
> > + flush_tlb_kernel_range(start, start + VMEMMAP_HPAGE_SIZE);
> > +}
> > +
> > +static inline void dissolve_compound_page(struct page *page, unsigned int order)
> > +{
> > + int i;
> > + unsigned int nr_pages = 1 << order;
> > +
> > + for (i = 1; i < nr_pages; i++)
> > + set_page_count(page + i, 1);
> > +}
> > +
> > +static void merge_gigantic_page_vmemmap(struct hstate *h, struct page *head,
> > + pmd_t *pmd)
> > +{
> > + LIST_HEAD(free_pages);
> > + unsigned long addr = (unsigned long)head;
> > + unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
> > +
> > + for (; addr < end; addr += VMEMMAP_HPAGE_SIZE) {
> > + void *to;
> > + struct page *page;
> > +
> > + page = alloc_pages(GFP_VMEMMAP_PAGE & ~__GFP_NOFAIL,
> > + VMEMMAP_HPAGE_ORDER);
> > + if (!page)
> > + goto out;
> > +
> > + dissolve_compound_page(page, VMEMMAP_HPAGE_ORDER);
> > + to = page_to_virt(page);
> > + memcpy(to, (void *)addr, VMEMMAP_HPAGE_SIZE);
> > +
> > + /*
> > + * Make sure that any data that writes to the
> > + * @to is made visible to the physical page.
> > + */
> > + flush_kernel_vmap_range(to, VMEMMAP_HPAGE_SIZE);
> > +
> > + merge_huge_page_pmd_vmemmap(pmd++, addr, page, &free_pages);
> > + }
> > +out:
> > + free_vmemmap_page_list(&free_pages);
> > +}
> > +
> > static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
> > {
> > int i;
> > @@ -454,10 +560,18 @@ void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
> > __remap_huge_page_pte_vmemmap);
> > if (!freed_vmemmap_hpage_dec(pmd_page(*pmd)) && pmd_split(pmd)) {
> > /*
> > - * Todo:
> > - * Merge pte to huge pmd if it has ever been split.
> > + * Merge pte to huge pmd if it has ever been split. Now only
> > + * support gigantic page which's vmemmap pages size is an
> > + * integer multiple of PMD_SIZE. This is the simplest case
> > + * to handle.
> > */
> > clear_pmd_split(pmd);
> > +
> > + if (IS_ALIGNED(vmemmap_pages_per_hpage(h), VMEMMAP_HPAGE_NR)) {
> > + spin_unlock(ptl);
> > + merge_gigantic_page_vmemmap(h, head, pmd);
> > + return;
> > + }
> > }
> > spin_unlock(ptl);
> > }
> > --
> > 2.11.0
>
> --
> Michal Hocko
> SUSE Labs
--
Yours,
Muchun