Re: [PATCH v2 11/21] mm/slab: move the rest of slub_def.h to mm/slab.h

From: Hyeonggon Yoo
Date: Wed Dec 06 2023 - 04:45:33 EST


On Mon, Nov 20, 2023 at 07:34:22PM +0100, Vlastimil Babka wrote:
> mm/slab.h is the only place to include include/linux/slub_def.h which
> has allowed switching between SLAB and SLUB. Now we can simply move the
> contents over and remove slub_def.h.
>
> Use this opportunity to fix up some whitespace (alignment) issues.
>
> Reviewed-by: Kees Cook <keescook@xxxxxxxxxxxx>
> Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx>
> ---
> include/linux/slub_def.h | 150 -----------------------------------------------
> mm/slab.h | 138 ++++++++++++++++++++++++++++++++++++++++++-
> 2 files changed, 137 insertions(+), 151 deletions(-)
>
> diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
> deleted file mode 100644
> index a0229ea42977..000000000000
> --- a/include/linux/slub_def.h
> +++ /dev/null
> @@ -1,150 +0,0 @@
> -/* SPDX-License-Identifier: GPL-2.0 */
> -#ifndef _LINUX_SLUB_DEF_H
> -#define _LINUX_SLUB_DEF_H
> -
> -/*
> - * SLUB : A Slab allocator without object queues.
> - *
> - * (C) 2007 SGI, Christoph Lameter
> - */
> -#include <linux/kfence.h>
> -#include <linux/kobject.h>
> -#include <linux/reciprocal_div.h>
> -#include <linux/local_lock.h>
> -
> -#ifdef CONFIG_SLUB_CPU_PARTIAL
> -#define slub_percpu_partial(c) ((c)->partial)
> -
> -#define slub_set_percpu_partial(c, p) \
> -({ \
> - slub_percpu_partial(c) = (p)->next; \
> -})
> -
> -#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
> -#else
> -#define slub_percpu_partial(c) NULL
> -
> -#define slub_set_percpu_partial(c, p)
> -
> -#define slub_percpu_partial_read_once(c) NULL
> -#endif // CONFIG_SLUB_CPU_PARTIAL
> -
> -/*
> - * Word size structure that can be atomically updated or read and that
> - * contains both the order and the number of objects that a slab of the
> - * given order would contain.
> - */
> -struct kmem_cache_order_objects {
> - unsigned int x;
> -};
> -
> -/*
> - * Slab cache management.
> - */
> -struct kmem_cache {
> -#ifndef CONFIG_SLUB_TINY
> - struct kmem_cache_cpu __percpu *cpu_slab;
> -#endif
> - /* Used for retrieving partial slabs, etc. */
> - slab_flags_t flags;
> - unsigned long min_partial;
> - unsigned int size; /* The size of an object including metadata */
> - unsigned int object_size;/* The size of an object without metadata */
> - struct reciprocal_value reciprocal_size;
> - unsigned int offset; /* Free pointer offset */
> -#ifdef CONFIG_SLUB_CPU_PARTIAL
> - /* Number of per cpu partial objects to keep around */
> - unsigned int cpu_partial;
> - /* Number of per cpu partial slabs to keep around */
> - unsigned int cpu_partial_slabs;
> -#endif
> - struct kmem_cache_order_objects oo;
> -
> - /* Allocation and freeing of slabs */
> - struct kmem_cache_order_objects min;
> - gfp_t allocflags; /* gfp flags to use on each alloc */
> - int refcount; /* Refcount for slab cache destroy */
> - void (*ctor)(void *);
> - unsigned int inuse; /* Offset to metadata */
> - unsigned int align; /* Alignment */
> - unsigned int red_left_pad; /* Left redzone padding size */
> - const char *name; /* Name (only for display!) */
> - struct list_head list; /* List of slab caches */
> -#ifdef CONFIG_SYSFS
> - struct kobject kobj; /* For sysfs */
> -#endif
> -#ifdef CONFIG_SLAB_FREELIST_HARDENED
> - unsigned long random;
> -#endif
> -
> -#ifdef CONFIG_NUMA
> - /*
> - * Defragmentation by allocating from a remote node.
> - */
> - unsigned int remote_node_defrag_ratio;
> -#endif
> -
> -#ifdef CONFIG_SLAB_FREELIST_RANDOM
> - unsigned int *random_seq;
> -#endif
> -
> -#ifdef CONFIG_KASAN_GENERIC
> - struct kasan_cache kasan_info;
> -#endif
> -
> -#ifdef CONFIG_HARDENED_USERCOPY
> - unsigned int useroffset; /* Usercopy region offset */
> - unsigned int usersize; /* Usercopy region size */
> -#endif
> -
> - struct kmem_cache_node *node[MAX_NUMNODES];
> -};
> -
> -#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
> -#define SLAB_SUPPORTS_SYSFS
> -void sysfs_slab_unlink(struct kmem_cache *);
> -void sysfs_slab_release(struct kmem_cache *);
> -#else
> -static inline void sysfs_slab_unlink(struct kmem_cache *s)
> -{
> -}
> -static inline void sysfs_slab_release(struct kmem_cache *s)
> -{
> -}
> -#endif
> -
> -void *fixup_red_left(struct kmem_cache *s, void *p);
> -
> -static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
> - void *x) {
> - void *object = x - (x - slab_address(slab)) % cache->size;
> - void *last_object = slab_address(slab) +
> - (slab->objects - 1) * cache->size;
> - void *result = (unlikely(object > last_object)) ? last_object : object;
> -
> - result = fixup_red_left(cache, result);
> - return result;
> -}
> -
> -/* Determine object index from a given position */
> -static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
> - void *addr, void *obj)
> -{
> - return reciprocal_divide(kasan_reset_tag(obj) - addr,
> - cache->reciprocal_size);
> -}
> -
> -static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> - const struct slab *slab, void *obj)
> -{
> - if (is_kfence_address(obj))
> - return 0;
> - return __obj_to_index(cache, slab_address(slab), obj);
> -}
> -
> -static inline int objs_per_slab(const struct kmem_cache *cache,
> - const struct slab *slab)
> -{
> - return slab->objects;
> -}
> -#endif /* _LINUX_SLUB_DEF_H */
> diff --git a/mm/slab.h b/mm/slab.h
> index 014c36ea51fa..3a8d13c099fa 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -209,7 +209,143 @@ static inline size_t slab_size(const struct slab *slab)
> return PAGE_SIZE << slab_order(slab);
> }
>
> -#include <linux/slub_def.h>
> +#include <linux/kfence.h>
> +#include <linux/kobject.h>
> +#include <linux/reciprocal_div.h>
> +#include <linux/local_lock.h>
> +
> +#ifdef CONFIG_SLUB_CPU_PARTIAL
> +#define slub_percpu_partial(c) ((c)->partial)
> +
> +#define slub_set_percpu_partial(c, p) \
> +({ \
> + slub_percpu_partial(c) = (p)->next; \
> +})
> +
> +#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
> +#else
> +#define slub_percpu_partial(c) NULL
> +
> +#define slub_set_percpu_partial(c, p)
> +
> +#define slub_percpu_partial_read_once(c) NULL
> +#endif // CONFIG_SLUB_CPU_PARTIAL
> +
> +/*
> + * Word size structure that can be atomically updated or read and that
> + * contains both the order and the number of objects that a slab of the
> + * given order would contain.
> + */
> +struct kmem_cache_order_objects {
> + unsigned int x;
> +};
> +
> +/*
> + * Slab cache management.
> + */
> +struct kmem_cache {
> +#ifndef CONFIG_SLUB_TINY
> + struct kmem_cache_cpu __percpu *cpu_slab;
> +#endif
> + /* Used for retrieving partial slabs, etc. */
> + slab_flags_t flags;
> + unsigned long min_partial;
> + unsigned int size; /* Object size including metadata */
> + unsigned int object_size; /* Object size without metadata */
> + struct reciprocal_value reciprocal_size;
> + unsigned int offset; /* Free pointer offset */
> +#ifdef CONFIG_SLUB_CPU_PARTIAL
> + /* Number of per cpu partial objects to keep around */
> + unsigned int cpu_partial;
> + /* Number of per cpu partial slabs to keep around */
> + unsigned int cpu_partial_slabs;
> +#endif
> + struct kmem_cache_order_objects oo;
> +
> + /* Allocation and freeing of slabs */
> + struct kmem_cache_order_objects min;
> + gfp_t allocflags; /* gfp flags to use on each alloc */
> + int refcount; /* Refcount for slab cache destroy */
> + void (*ctor)(void *object); /* Object constructor */
> + unsigned int inuse; /* Offset to metadata */
> + unsigned int align; /* Alignment */
> + unsigned int red_left_pad; /* Left redzone padding size */
> + const char *name; /* Name (only for display!) */
> + struct list_head list; /* List of slab caches */
> +#ifdef CONFIG_SYSFS
> + struct kobject kobj; /* For sysfs */
> +#endif
> +#ifdef CONFIG_SLAB_FREELIST_HARDENED
> + unsigned long random;
> +#endif
> +
> +#ifdef CONFIG_NUMA
> + /*
> + * Defragmentation by allocating from a remote node.
> + */
> + unsigned int remote_node_defrag_ratio;
> +#endif
> +
> +#ifdef CONFIG_SLAB_FREELIST_RANDOM
> + unsigned int *random_seq;
> +#endif
> +
> +#ifdef CONFIG_KASAN_GENERIC
> + struct kasan_cache kasan_info;
> +#endif
> +
> +#ifdef CONFIG_HARDENED_USERCOPY
> + unsigned int useroffset; /* Usercopy region offset */
> + unsigned int usersize; /* Usercopy region size */
> +#endif
> +
> + struct kmem_cache_node *node[MAX_NUMNODES];
> +};
> +
> +#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
> +#define SLAB_SUPPORTS_SYSFS
> +void sysfs_slab_unlink(struct kmem_cache *s);
> +void sysfs_slab_release(struct kmem_cache *s);
> +#else
> +static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
> +static inline void sysfs_slab_release(struct kmem_cache *s) { }
> +#endif
> +
> +void *fixup_red_left(struct kmem_cache *s, void *p);
> +
> +static inline void *nearest_obj(struct kmem_cache *cache,
> + const struct slab *slab, void *x)
> +{
> + void *object = x - (x - slab_address(slab)) % cache->size;
> + void *last_object = slab_address(slab) +
> + (slab->objects - 1) * cache->size;
> + void *result = (unlikely(object > last_object)) ? last_object : object;
> +
> + result = fixup_red_left(cache, result);
> + return result;
> +}
> +
> +/* Determine object index from a given position */
> +static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
> + void *addr, void *obj)
> +{
> + return reciprocal_divide(kasan_reset_tag(obj) - addr,
> + cache->reciprocal_size);
> +}
> +
> +static inline unsigned int obj_to_index(const struct kmem_cache *cache,
> + const struct slab *slab, void *obj)
> +{
> + if (is_kfence_address(obj))
> + return 0;
> + return __obj_to_index(cache, slab_address(slab), obj);
> +}
> +
> +static inline int objs_per_slab(const struct kmem_cache *cache,
> + const struct slab *slab)
> +{
> + return slab->objects;
> +}
>
> #include <linux/memcontrol.h>
> #include <linux/fault-inject.h>

Looks good to me,
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@xxxxxxxxx>

>
> --
> 2.42.1
>
>