Re: [RFC PATCH 3/8] mm/vmalloc: Extend vmap_small_pages_range_noflush() to support larger page_shift sizes
From: Dev Jain
Date: Wed Apr 08 2026 - 07:08:54 EST
On 08/04/26 8:21 am, Barry Song (Xiaomi) wrote:
> vmap_small_pages_range_noflush() provides a clean interface by taking
> struct page **pages and mapping them via direct PTE iteration. This
> avoids the page table zigzag seen when using
"Zigzag" is ambiguous. Just say "page table rewalk". Also please
elaborate on why the rewalk is happening currently.
> vmap_range_noflush() for page_shift values other than PAGE_SHIFT.
>
> Extend it to support larger page_shift values, and add PMD- and
> contiguous-PTE mappings as well.
So we can drop the "small" here since now it supports larger chunks
as well.
Also at this point the code you add is a no-op since you pass PAGE_SHIFT.
Let us just squash patch 4 into this. This patch looks weird retaining
the pagetable-rewalk algorithm when it literally adds functionality
to avoid that.
>
> Signed-off-by: Barry Song (Xiaomi) <baohua@xxxxxxxxxx>
> ---
> mm/vmalloc.c | 54 ++++++++++++++++++++++++++++++++++++++++------------
> 1 file changed, 42 insertions(+), 12 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 57eae99d9909..5bf072297536 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -524,8 +524,9 @@ void vunmap_range(unsigned long addr, unsigned long end)
>
> static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
> unsigned long end, pgprot_t prot, struct page **pages, int *nr,
> - pgtbl_mod_mask *mask)
> + pgtbl_mod_mask *mask, unsigned int shift)
> {
> + unsigned int steps = 1;
> int err = 0;
> pte_t *pte;
>
> @@ -543,6 +544,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
> do {
> struct page *page = pages[*nr];
>
> + steps = 1;
> if (WARN_ON(!pte_none(ptep_get(pte)))) {
> err = -EBUSY;
> break;
> @@ -556,9 +558,24 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
> break;
> }
>
> +#ifdef CONFIG_HUGETLB_PAGE
> + if (shift != PAGE_SHIFT) {
> + unsigned long pfn = page_to_pfn(page), size;
> +
> + size = arch_vmap_pte_range_map_size(addr, end, pfn, shift);
> + if (size != PAGE_SIZE) {
> + steps = size >> PAGE_SHIFT;
> + pte_t entry = pfn_pte(pfn, prot);
> +
> + entry = arch_make_huge_pte(entry, ilog2(size), 0);
> + set_huge_pte_at(&init_mm, addr, pte, entry, size);
> + continue;
> + }
> + }
> +#endif
> +
> set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
> - (*nr)++;
> - } while (pte++, addr += PAGE_SIZE, addr != end);
> + } while (pte += steps, *nr += steps, addr += PAGE_SIZE * steps, addr != end);
>
> lazy_mmu_mode_disable();
> *mask |= PGTBL_PTE_MODIFIED;
> @@ -568,7 +585,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
>
> static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
> unsigned long end, pgprot_t prot, struct page **pages, int *nr,
> - pgtbl_mod_mask *mask)
> + pgtbl_mod_mask *mask, unsigned int shift)
> {
> pmd_t *pmd;
> unsigned long next;
> @@ -578,7 +595,20 @@ static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
> return -ENOMEM;
> do {
> next = pmd_addr_end(addr, end);
> - if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
> +
> + if (shift == PMD_SHIFT) {
> + struct page *page = pages[*nr];
> + phys_addr_t phys_addr = page_to_phys(page);
> +
> + if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
> + shift)) {
> + *mask |= PGTBL_PMD_MODIFIED;
> + *nr += 1 << (shift - PAGE_SHIFT);
> + continue;
> + }
> + }
> +
> + if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask, shift))
> return -ENOMEM;
> } while (pmd++, addr = next, addr != end);
> return 0;
> @@ -586,7 +616,7 @@ static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
>
> static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
> unsigned long end, pgprot_t prot, struct page **pages, int *nr,
> - pgtbl_mod_mask *mask)
> + pgtbl_mod_mask *mask, unsigned int shift)
> {
> pud_t *pud;
> unsigned long next;
> @@ -596,7 +626,7 @@ static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
> return -ENOMEM;
> do {
> next = pud_addr_end(addr, end);
> - if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
> + if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask, shift))
> return -ENOMEM;
> } while (pud++, addr = next, addr != end);
> return 0;
> @@ -604,7 +634,7 @@ static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
>
> static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
> unsigned long end, pgprot_t prot, struct page **pages, int *nr,
> - pgtbl_mod_mask *mask)
> + pgtbl_mod_mask *mask, unsigned int shift)
> {
> p4d_t *p4d;
> unsigned long next;
> @@ -614,14 +644,14 @@ static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
> return -ENOMEM;
> do {
> next = p4d_addr_end(addr, end);
> - if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
> + if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask, shift))
> return -ENOMEM;
> } while (p4d++, addr = next, addr != end);
> return 0;
> }
>
> static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
> - pgprot_t prot, struct page **pages)
> + pgprot_t prot, struct page **pages, unsigned int shift)
> {
> unsigned long start = addr;
> pgd_t *pgd;
> @@ -636,7 +666,7 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
> next = pgd_addr_end(addr, end);
> if (pgd_bad(*pgd))
> mask |= PGTBL_PGD_MODIFIED;
> - err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
> + err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask, shift);
> if (err)
> break;
> } while (pgd++, addr = next, addr != end);
> @@ -665,7 +695,7 @@ int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
>
> if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
> page_shift == PAGE_SHIFT)
> - return vmap_small_pages_range_noflush(addr, end, prot, pages);
> + return vmap_small_pages_range_noflush(addr, end, prot, pages, PAGE_SHIFT);
>
> for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
> int err;