Re: [PATCH 01/12] Revert "mm: pgtable: make ptlock be freed by RCU"

From: Yu Zhao
Date: Sat Dec 14 2024 - 13:30:25 EST


On Sat, Dec 14, 2024 at 2:03 AM Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> wrote:
>
> This reverts commit 2f3443770437e49abc39af26962d293851cbab6d.
>
> Signed-off-by: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx>

Bloating struct pt_lock is unnecessary. Glad to see it's reverted.

Acked-by: Yu Zhao <yuzhao@xxxxxxxxxx>


> ---
> include/linux/mm.h | 2 +-
> include/linux/mm_types.h | 9 +--------
> mm/memory.c | 22 ++++++----------------
> 3 files changed, 8 insertions(+), 25 deletions(-)
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index e7902980439cc..5e73e53c34e9e 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -2988,7 +2988,7 @@ void ptlock_free(struct ptdesc *ptdesc);
>
> static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
> {
> - return &(ptdesc->ptl->ptl);
> + return ptdesc->ptl;
> }
> #else /* ALLOC_SPLIT_PTLOCKS */
> static inline void ptlock_cache_init(void)
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index df8f5152644ec..5d8779997266e 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -434,13 +434,6 @@ FOLIO_MATCH(flags, _flags_2a);
> FOLIO_MATCH(compound_head, _head_2a);
> #undef FOLIO_MATCH
>
> -#if ALLOC_SPLIT_PTLOCKS
> -struct pt_lock {
> - spinlock_t ptl;
> - struct rcu_head rcu;
> -};
> -#endif
> -
> /**
> * struct ptdesc - Memory descriptor for page tables.
> * @__page_flags: Same as page flags. Powerpc only.
> @@ -485,7 +478,7 @@ struct ptdesc {
> union {
> unsigned long _pt_pad_2;
> #if ALLOC_SPLIT_PTLOCKS
> - struct pt_lock *ptl;
> + spinlock_t *ptl;
> #else
> spinlock_t ptl;
> #endif
> diff --git a/mm/memory.c b/mm/memory.c
> index d9af83dd86bbf..83765632e20b0 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -7041,34 +7041,24 @@ static struct kmem_cache *page_ptl_cachep;
>
> void __init ptlock_cache_init(void)
> {
> - page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(struct pt_lock), 0,
> + page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
> SLAB_PANIC, NULL);
> }
>
> bool ptlock_alloc(struct ptdesc *ptdesc)
> {
> - struct pt_lock *pt_lock;
> + spinlock_t *ptl;
>
> - pt_lock = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
> - if (!pt_lock)
> + ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
> + if (!ptl)
> return false;
> - ptdesc->ptl = pt_lock;
> + ptdesc->ptl = ptl;
> return true;
> }
>
> -static void ptlock_free_rcu(struct rcu_head *head)
> -{
> - struct pt_lock *pt_lock;
> -
> - pt_lock = container_of(head, struct pt_lock, rcu);
> - kmem_cache_free(page_ptl_cachep, pt_lock);
> -}
> -
> void ptlock_free(struct ptdesc *ptdesc)
> {
> - struct pt_lock *pt_lock = ptdesc->ptl;
> -
> - call_rcu(&pt_lock->rcu, ptlock_free_rcu);
> + kmem_cache_free(page_ptl_cachep, ptdesc->ptl);
> }
> #endif
>
> --
> 2.20.1
>