Re: [PATCH v3 05/15] mm/sl[au]b: factor out __do_kmalloc_node()
From: Vlastimil Babka
Date: Thu Jul 28 2022 - 10:45:13 EST
On 7/12/22 15:39, Hyeonggon Yoo wrote:
> __kmalloc(), __kmalloc_node(), __kmalloc_node_track_caller()
> mostly do same job. Factor out common code into __do_kmalloc_node().
>
> Note that this patch also fixes missing kasan_kmalloc() in SLUB's
> __kmalloc_node_track_caller().
>
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>
Reviewed-by: Vlastimil Babka <vbabka@xxxxxxx>
> ---
> mm/slab.c | 30 +----------------------
> mm/slub.c | 71 +++++++++++++++----------------------------------------
> 2 files changed, 20 insertions(+), 81 deletions(-)
>
> diff --git a/mm/slab.c b/mm/slab.c
> index da2f6a5dd8fa..ab34727d61b2 100644
> --- a/mm/slab.c
> +++ b/mm/slab.c
> @@ -3631,37 +3631,9 @@ void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
> }
> #endif
>
> -/**
> - * __do_kmalloc - allocate memory
> - * @size: how many bytes of memory are required.
> - * @flags: the type of memory to allocate (see kmalloc).
> - * @caller: function caller for debug tracking of the caller
> - *
> - * Return: pointer to the allocated memory or %NULL in case of error
> - */
> -static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
> - unsigned long caller)
> -{
> - struct kmem_cache *cachep;
> - void *ret;
> -
> - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
> - return NULL;
> - cachep = kmalloc_slab(size, flags);
> - if (unlikely(ZERO_OR_NULL_PTR(cachep)))
> - return cachep;
> - ret = slab_alloc(cachep, NULL, flags, size, caller);
> -
> - ret = kasan_kmalloc(cachep, ret, size, flags);
> - trace_kmalloc(caller, ret, cachep,
> - size, cachep->size, flags);
> -
> - return ret;
> -}
> -
> void *__kmalloc(size_t size, gfp_t flags)
> {
> - return __do_kmalloc(size, flags, _RET_IP_);
> + return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
> }
> EXPORT_SYMBOL(__kmalloc);
>
> diff --git a/mm/slub.c b/mm/slub.c
> index 7c284535a62b..2ccc473e0ae7 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4402,29 +4402,6 @@ static int __init setup_slub_min_objects(char *str)
>
> __setup("slub_min_objects=", setup_slub_min_objects);
>
> -void *__kmalloc(size_t size, gfp_t flags)
> -{
> - struct kmem_cache *s;
> - void *ret;
> -
> - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
> - return kmalloc_large(size, flags);
> -
> - s = kmalloc_slab(size, flags);
> -
> - if (unlikely(ZERO_OR_NULL_PTR(s)))
> - return s;
> -
> - ret = slab_alloc(s, NULL, flags, _RET_IP_, size);
> -
> - trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags);
> -
> - ret = kasan_kmalloc(s, ret, size, flags);
> -
> - return ret;
> -}
> -EXPORT_SYMBOL(__kmalloc);
> -
> static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
> {
> struct page *page;
> @@ -4442,7 +4419,8 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
> return kmalloc_large_node_hook(ptr, size, flags);
> }
>
> -void *__kmalloc_node(size_t size, gfp_t flags, int node)
> +static __always_inline
> +void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
> {
> struct kmem_cache *s;
> void *ret;
> @@ -4450,7 +4428,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
> if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
> ret = kmalloc_large_node(size, flags, node);
>
> - trace_kmalloc_node(_RET_IP_, ret, NULL,
> + trace_kmalloc_node(caller, ret, NULL,
> size, PAGE_SIZE << get_order(size),
> flags, node);
>
> @@ -4462,16 +4440,28 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
> if (unlikely(ZERO_OR_NULL_PTR(s)))
> return s;
>
> - ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size);
> + ret = slab_alloc_node(s, NULL, flags, node, caller, size);
>
> - trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, flags, node);
> + trace_kmalloc_node(caller, ret, s, size, s->size, flags, node);
>
> ret = kasan_kmalloc(s, ret, size, flags);
>
> return ret;
> }
> +
> +void *__kmalloc_node(size_t size, gfp_t flags, int node)
> +{
> + return __do_kmalloc_node(size, flags, node, _RET_IP_);
> +}
> EXPORT_SYMBOL(__kmalloc_node);
>
> +void *__kmalloc(size_t size, gfp_t flags)
> +{
> + return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
> +}
> +EXPORT_SYMBOL(__kmalloc);
> +
> +
> #ifdef CONFIG_HARDENED_USERCOPY
> /*
> * Rejects incorrectly sized objects and objects that are to be copied
> @@ -4905,32 +4895,9 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
> }
>
> void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
> - int node, unsigned long caller)
> + int node, unsigned long caller)
> {
> - struct kmem_cache *s;
> - void *ret;
> -
> - if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
> - ret = kmalloc_large_node(size, gfpflags, node);
> -
> - trace_kmalloc_node(caller, ret, NULL,
> - size, PAGE_SIZE << get_order(size),
> - gfpflags, node);
> -
> - return ret;
> - }
> -
> - s = kmalloc_slab(size, gfpflags);
> -
> - if (unlikely(ZERO_OR_NULL_PTR(s)))
> - return s;
> -
> - ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size);
> -
> - /* Honor the call site pointer we received. */
> - trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
> -
> - return ret;
> + return __do_kmalloc_node(size, gfpflags, node, caller);
> }
> EXPORT_SYMBOL(__kmalloc_node_track_caller);
>