Re: [patch 105/147] arm64: switch to generic version of pte allocation

From: Qian Cai
Date: Fri Jul 12 2019 - 08:49:16 EST


Actually, this patch is slightly off. There is one delta need to apply (ignore
the part in pgtable.h which has already in mainline via the commit 615c48ad8f42
"arm64/mm: don't initialize pgd_cache twice") in.

https://lore.kernel.org/linux-mm/20190617151252.GF16810@rapoport-lnx/

On Thu, 2019-07-11 at 20:58 -0700, akpm@xxxxxxxxxxxxxxxxxxxx wrote:
> From: Mike Rapoport <rppt@xxxxxxxxxxxxx>
> Subject: arm64: switch to generic version of pte allocation
>
> The PTE allocations in arm64 are identical to the generic ones modulo the
> GFP flags.
>
> Using the generic pte_alloc_one() functions ensures that the user page
> tables are allocated with __GFP_ACCOUNT set.
>
> The arm64 definition of PGALLOC_GFP is removed and replaced with
> GFP_PGTABLE_USER for p[gum]d_alloc_one() for the user page tables andpgtable.h
>
> GFP_PGTABLE_KERNEL for the kernel page tables. The KVM memory cache is now
> using GFP_PGTABLE_USER.
>
> The mappings created with create_pgd_mapping() are now using
> GFP_PGTABLE_KERNEL.
>
> The conversion to the generic version of pte_free_kernel() removes the NULL
> check for pte.
>
> The pte_free() version on arm64 is identical to the generic one and
> can be simply dropped.
>
> [cai@xxxxxx: fix a bogus GFP flag in pgd_alloc()]
> Â Link: http://lkml.kernel.org/r/1559656836-24940-1-git-send-email-cai@xxxxxx
> Link: http://lkml.kernel.org/r/1557296232-15361-5-git-send-email-rppt@xxxxxxxx
> m.com
> Signed-off-by: Mike Rapoport <rppt@xxxxxxxxxxxxx>
> Cc: Albert Ou <aou@xxxxxxxxxxxxxxxxx>
> Cc: Anshuman Khandual <anshuman.khandual@xxxxxxx>
> Cc: Anton Ivanov <anton.ivanov@xxxxxxxxxxxxxxxxxx>
> Cc: Arnd Bergmann <arnd@xxxxxxxx>
> Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
> Cc: Geert Uytterhoeven <geert@xxxxxxxxxxxxxx>
> Cc: Greentime Hu <green.hu@xxxxxxxxx>
> Cc: Guan Xuetao <gxt@xxxxxxxxxx>
> Cc: Guo Ren <guoren@xxxxxxxxxx>
> Cc: Guo Ren <ren_guo@xxxxxxxxx>
> Cc: Helge Deller <deller@xxxxxx>
> Cc: Ley Foon Tan <lftan@xxxxxxxxxx>
> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
> Cc: Matt Turner <mattst88@xxxxxxxxx>
> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx>
> Cc: Michal Hocko <mhocko@xxxxxxxx>
> Cc: Palmer Dabbelt <palmer@xxxxxxxxxx>
> Cc: Paul Burton <paul.burton@xxxxxxxx>
> Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx>
> Cc: Richard Kuo <rkuo@xxxxxxxxxxxxxx>
> Cc: Richard Weinberger <richard@xxxxxx>
> Cc: Russell King <linux@xxxxxxxxxxxxxxx>
> Cc: Sam Creasey <sammy@xxxxxxxxx>
> Cc: Vincent Chen <deanbo422@xxxxxxxxx>
> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
> ---
>
> Âarch/arm64/include/asm/pgalloc.h |ÂÂÂ47 ++++-------------------------
> Âarch/arm64/mm/mmu.cÂÂÂÂÂÂÂÂÂÂÂÂÂÂ|ÂÂÂÂ2 -
> Âarch/arm64/mm/pgd.cÂÂÂÂÂÂÂÂÂÂÂÂÂÂ|ÂÂÂÂ9 ++++-
> Âvirt/kvm/arm/mmu.cÂÂÂÂÂÂÂÂÂÂÂÂÂÂÂ|ÂÂÂÂ2 -
> Â4 files changed, 17 insertions(+), 43 deletions(-)
>
> --- a/arch/arm64/include/asm/pgalloc.h~arm64-switch-to-generic-version-of-pte-
> allocation
> +++ a/arch/arm64/include/asm/pgalloc.h
> @@ -13,18 +13,23 @@
> Â#include <asm/cacheflush.h>
> Â#include <asm/tlbflush.h>
> Â
> +#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
> +
> Â#define check_pgt_cache() do { } while (0)
> Â
> -#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
> Â#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
> Â
> Â#if CONFIG_PGTABLE_LEVELS > 2
> Â
> Âstatic inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
> Â{
> + gfp_t gfp = GFP_PGTABLE_USER;
> Â struct page *page;
> Â
> - page = alloc_page(PGALLOC_GFP);
> + if (mm == &init_mm)
> + gfp = GFP_PGTABLE_KERNEL;
> +
> + page = alloc_page(gfp);
> Â if (!page)
> Â return NULL;
> Â if (!pgtable_pmd_page_ctor(page)) {
> @@ -61,7 +66,7 @@ static inline void __pud_populate(pud_t
> Â
> Âstatic inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
> Â{
> - return (pud_t *)__get_free_page(PGALLOC_GFP);
> + return (pud_t *)__get_free_page(GFP_PGTABLE_USER);
> Â}
> Â
> Âstatic inline void pud_free(struct mm_struct *mm, pud_t *pudp)
> @@ -89,42 +94,6 @@ static inline void __pgd_populate(pgd_t
> Âextern pgd_t *pgd_alloc(struct mm_struct *mm);
> Âextern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
> Â
> -static inline pte_t *
> -pte_alloc_one_kernel(struct mm_struct *mm)
> -{
> - return (pte_t *)__get_free_page(PGALLOC_GFP);
> -}
> -
> -static inline pgtable_t
> -pte_alloc_one(struct mm_struct *mm)
> -{
> - struct page *pte;
> -
> - pte = alloc_pages(PGALLOC_GFP, 0);
> - if (!pte)
> - return NULL;
> - if (!pgtable_page_ctor(pte)) {
> - __free_page(pte);
> - return NULL;
> - }
> - return pte;
> -}
> -
> -/*
> - * Free a PTE table.
> - */
> -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
> -{
> - if (ptep)
> - free_page((unsigned long)ptep);
> -}
> -
> -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
> -{
> - pgtable_page_dtor(pte);
> - __free_page(pte);
> -}
> -
> Âstatic inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
> Â ÂÂpmdval_t prot)
> Â{
> --- a/arch/arm64/mm/mmu.c~arm64-switch-to-generic-version-of-pte-allocation
> +++ a/arch/arm64/mm/mmu.c
> @@ -362,7 +362,7 @@ static void __create_pgd_mapping(pgd_t *
> Â
> Âstatic phys_addr_t __pgd_pgtable_alloc(int shift)
> Â{
> - void *ptr = (void *)__get_free_page(PGALLOC_GFP);
> + void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
> Â BUG_ON(!ptr);
> Â
> Â /* Ensure the zeroed page is visible to the page table walker */
> --- a/arch/arm64/mm/pgd.c~arm64-switch-to-generic-version-of-pte-allocation
> +++ a/arch/arm64/mm/pgd.c
> @@ -19,10 +19,15 @@ static struct kmem_cache *pgd_cache __ro
> Â
> Âpgd_t *pgd_alloc(struct mm_struct *mm)
> Â{
> + gfp_t gfp = GFP_PGTABLE_USER;
> +
> + if (unlikely(mm == &init_mm))
> + gfp = GFP_PGTABLE_KERNEL;
> +
> Â if (PGD_SIZE == PAGE_SIZE)
> - return (pgd_t *)__get_free_page(PGALLOC_GFP);
> + return (pgd_t *)__get_free_page(gfp);
> Â else
> - return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
> + return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_KERNEL);
> Â}
> Â
> Âvoid pgd_free(struct mm_struct *mm, pgd_t *pgd)
> --- a/virt/kvm/arm/mmu.c~arm64-switch-to-generic-version-of-pte-allocation
> +++ a/virt/kvm/arm/mmu.c
> @@ -129,7 +129,7 @@ static int mmu_topup_memory_cache(struct
> Â if (cache->nobjs >= min)
> Â return 0;
> Â while (cache->nobjs < max) {
> - page = (void *)__get_free_page(PGALLOC_GFP);
> + page = (void *)__get_free_page(GFP_PGTABLE_USER);
> Â if (!page)
> Â return -ENOMEM;
> Â cache->objects[cache->nobjs++] = page;
> _