Re:[PATCH v2 1/1] mm/slab: mark alloc tags empty for sheafs allocated with __GFP_NO_OBJ_EXT

From: David Wang

Date: Tue Feb 24 2026 - 21:52:12 EST


At 2026-02-25 06:11:32, "Suren Baghdasaryan" <surenb@xxxxxxxxxx> wrote:
>alloc_empty_sheaf() allocates sheafs from SLAB_KMALLOC caches using
>__GFP_NO_OBJ_EXT to avoid recursion, however it does not mark their
>allocation tags empty before freeing, which results in a warning when
>CONFIG_MEM_ALLOC_PROFILING_DEBUG is set. Fix this by marking allocation
>tags for such allocations as empty.
>
>Reported-by: David Wang <00107082@xxxxxxx>
>Closes: https://lore.kernel.org/all/20260223155128.3849-1-00107082@xxxxxxx/
>Analyzed-by: Harry Yoo <harry.yoo@xxxxxxxxxx>
>Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx>
>---
> mm/slab.h | 4 ++--
> mm/slub.c | 23 +++++++++++++++--------
> 2 files changed, 17 insertions(+), 10 deletions(-)
>
>diff --git a/mm/slab.h b/mm/slab.h
>index 71c7261bf822..f6ef862b60ef 100644
>--- a/mm/slab.h
>+++ b/mm/slab.h
>@@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
>
> /* Determine object index from a given position */
> static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
>- void *addr, void *obj)
>+ void *addr, const void *obj)
> {
> return reciprocal_divide(kasan_reset_tag(obj) - addr,
> cache->reciprocal_size);
> }
>
> static inline unsigned int obj_to_index(const struct kmem_cache *cache,
>- const struct slab *slab, void *obj)
>+ const struct slab *slab, const void *obj)
> {
> if (is_kfence_address(obj))
> return 0;
>diff --git a/mm/slub.c b/mm/slub.c
>index 862642c165ed..5c76e55eee7d 100644
>--- a/mm/slub.c
>+++ b/mm/slub.c
>@@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
>
> #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
>
>-static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
>+static inline void mark_objexts_empty(const void *obj)
> {
>- struct slab *obj_exts_slab;
>+ struct slab *obj_slab;
> unsigned long slab_exts;
>
>- obj_exts_slab = virt_to_slab(obj_exts);
>- slab_exts = slab_obj_exts(obj_exts_slab);
>+ obj_slab = virt_to_slab(obj);
>+ slab_exts = slab_obj_exts(obj_slab);
> if (slab_exts) {
> get_slab_obj_exts(slab_exts);
>- unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
>- obj_exts_slab, obj_exts);
>- struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
>+ unsigned int offs = obj_to_index(obj_slab->slab_cache,
>+ obj_slab, obj);
>+ struct slabobj_ext *ext = slab_obj_ext(obj_slab,
> slab_exts, offs);
>
> if (unlikely(is_codetag_empty(&ext->ref))) {
>@@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
>
> #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
>
>-static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
>+static inline void mark_objexts_empty(const void *obj) {}
> static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
> static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
> struct slabobj_ext *vec, unsigned int objects) {}
>@@ -2312,6 +2312,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
>
> #else /* CONFIG_SLAB_OBJ_EXT */
>
>+static inline void mark_objexts_empty(const void *obj)
>+{
>+}
>+
> static inline void init_slab_obj_exts(struct slab *slab)
> {
> }
>@@ -2783,6 +2787,9 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
>
> static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
> {
>+ if (s->flags & SLAB_KMALLOC)
>+ mark_objexts_empty(sheaf);
>+
> kfree(sheaf);
>
> stat(s, SHEAF_FREE);
>
>base-commit: 7dff99b354601dd01829e1511711846e04340a69
>--
>2.53.0.414.gf7e9f6c205-goog

Tested-by: David Wang <00107082@xxxxxxx>


David