Re: [PATCH v1 2/3] mm/hugetlb: enforce that PMD PT sharing has split PMD PT locks

From: Mike Rapoport
Date: Sun Jul 28 2024 - 08:47:50 EST


On Fri, Jul 26, 2024 at 05:07:27PM +0200, David Hildenbrand wrote:
> Sharing page tables between processes but falling back to per-MM page
> table locks cannot possibly work.
>
> So, let's make sure that we do have split PMD locks by adding a new
> Kconfig option and letting that depend on CONFIG_SPLIT_PMD_PTLOCKS.
>
> Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>

Acked-by: Mike Rapoport (Microsoft) <rppt@xxxxxxxxxx>

> ---
> fs/Kconfig | 4 ++++
> include/linux/hugetlb.h | 5 ++---
> mm/hugetlb.c | 8 ++++----
> 3 files changed, 10 insertions(+), 7 deletions(-)
>
> diff --git a/fs/Kconfig b/fs/Kconfig
> index a46b0cbc4d8f6..0e4efec1d92e6 100644
> --- a/fs/Kconfig
> +++ b/fs/Kconfig
> @@ -288,6 +288,10 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
> depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
> depends on SPARSEMEM_VMEMMAP
>
> +config HUGETLB_PMD_PAGE_TABLE_SHARING
> + def_bool HUGETLB_PAGE
> + depends on ARCH_WANT_HUGE_PMD_SHARE && SPLIT_PMD_PTLOCKS
> +
> config ARCH_HAS_GIGANTIC_PAGE
> bool
>
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index da800e56fe590..4d2f3224ff027 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -1243,7 +1243,7 @@ static inline __init void hugetlb_cma_reserve(int order)
> }
> #endif
>
> -#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
> +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
> static inline bool hugetlb_pmd_shared(pte_t *pte)
> {
> return page_count(virt_to_page(pte)) > 1;
> @@ -1279,8 +1279,7 @@ bool __vma_private_lock(struct vm_area_struct *vma);
> static inline pte_t *
> hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
> {
> -#if defined(CONFIG_HUGETLB_PAGE) && \
> - defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
> +#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
> struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
>
> /*
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 0858a18272073..c4d94e122c41f 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -7211,7 +7211,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
> return 0;
> }
>
> -#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
> +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
> static unsigned long page_table_shareable(struct vm_area_struct *svma,
> struct vm_area_struct *vma,
> unsigned long addr, pgoff_t idx)
> @@ -7373,7 +7373,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
> return 1;
> }
>
> -#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
> +#else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
>
> pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
> unsigned long addr, pud_t *pud)
> @@ -7396,7 +7396,7 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
> {
> return false;
> }
> -#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
> +#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
>
> #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
> pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
> @@ -7494,7 +7494,7 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
> /* See description above. Architectures can provide their own version. */
> __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
> {
> -#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
> +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
> if (huge_page_size(h) == PMD_SIZE)
> return PUD_SIZE - PMD_SIZE;
> #endif
> --
> 2.45.2
>
>

--
Sincerely yours,
Mike.