[PATCH v3 1/1] mm/slab: mark alloc tags empty for sheaves allocated with __GFP_NO_OBJ_EXT
From: Suren Baghdasaryan
Date: Wed Feb 25 2026 - 11:39:24 EST
alloc_empty_sheaf() allocates sheaves from SLAB_KMALLOC caches using
__GFP_NO_OBJ_EXT to avoid recursion, however it does not mark their
allocation tags empty before freeing, which results in a warning when
CONFIG_MEM_ALLOC_PROFILING_DEBUG is set. Fix this by marking allocation
tags for such sheaves as empty.
Reported-by: David Wang <00107082@xxxxxxx>
Closes: https://lore.kernel.org/all/20260223155128.3849-1-00107082@xxxxxxx/
Analyzed-by: Harry Yoo <harry.yoo@xxxxxxxxxx>
Signed-off-by: Suren Baghdasaryan <surenb@xxxxxxxxxx>
Reviewed-by: Harry Yoo <harry.yoo@xxxxxxxxxx>
Tested-by: Harry Yoo <harry.yoo@xxxxxxxxxx>
Tested-by: David Wang <00107082@xxxxxxx>
---
include/linux/gfp_types.h | 2 ++
mm/slab.h | 4 ++--
mm/slub.c | 33 +++++++++++++++++++++++----------
3 files changed, 27 insertions(+), 12 deletions(-)
diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h
index 814bb2892f99..6c75df30a281 100644
--- a/include/linux/gfp_types.h
+++ b/include/linux/gfp_types.h
@@ -139,6 +139,8 @@ enum {
* %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
*
* %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
+ * mark_obj_codetag_empty() should be called upon freeing for objects allocated
+ * with this flag to indicate that their NULL tags are expected and normal.
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
diff --git a/mm/slab.h b/mm/slab.h
index 71c7261bf822..f6ef862b60ef 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -290,14 +290,14 @@ static inline void *nearest_obj(struct kmem_cache *cache,
/* Determine object index from a given position */
static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
- void *addr, void *obj)
+ void *addr, const void *obj)
{
return reciprocal_divide(kasan_reset_tag(obj) - addr,
cache->reciprocal_size);
}
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
- const struct slab *slab, void *obj)
+ const struct slab *slab, const void *obj)
{
if (is_kfence_address(obj))
return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 862642c165ed..34c32749f091 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2041,18 +2041,18 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
-static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
+static inline void mark_obj_codetag_empty(const void *obj)
{
- struct slab *obj_exts_slab;
+ struct slab *obj_slab;
unsigned long slab_exts;
- obj_exts_slab = virt_to_slab(obj_exts);
- slab_exts = slab_obj_exts(obj_exts_slab);
+ obj_slab = virt_to_slab(obj);
+ slab_exts = slab_obj_exts(obj_slab);
if (slab_exts) {
get_slab_obj_exts(slab_exts);
- unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
- obj_exts_slab, obj_exts);
- struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab,
+ unsigned int offs = obj_to_index(obj_slab->slab_cache,
+ obj_slab, obj);
+ struct slabobj_ext *ext = slab_obj_ext(obj_slab,
slab_exts, offs);
if (unlikely(is_codetag_empty(&ext->ref))) {
@@ -2090,7 +2090,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
-static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
+static inline void mark_obj_codetag_empty(const void *obj) {}
static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
struct slabobj_ext *vec, unsigned int objects) {}
@@ -2211,7 +2211,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
* assign slabobj_exts in parallel. In this case the existing
* objcg vector should be reused.
*/
- mark_objexts_empty(vec);
+ mark_obj_codetag_empty(vec);
if (unlikely(!allow_spin))
kfree_nolock(vec);
else
@@ -2254,7 +2254,7 @@ static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
* NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
* the extension for obj_exts is expected to be NULL.
*/
- mark_objexts_empty(obj_exts);
+ mark_obj_codetag_empty(obj_exts);
if (allow_spin)
kfree(obj_exts);
else
@@ -2312,6 +2312,10 @@ static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
#else /* CONFIG_SLAB_OBJ_EXT */
+static inline void mark_obj_codetag_empty(const void *obj)
+{
+}
+
static inline void init_slab_obj_exts(struct slab *slab)
{
}
@@ -2783,6 +2787,15 @@ static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s,
static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
{
+ /*
+ * If the sheaf was created with __GFP_NO_OBJ_EXT flag then its
+ * corresponding extension is NULL and alloc_tag_sub() will throw a
+ * warning, therefore replace NULL with CODETAG_EMPTY to indicate
+ * that the extension for this sheaf is expected to be NULL.
+ */
+ if (s->flags & SLAB_KMALLOC)
+ mark_obj_codetag_empty(sheaf);
+
kfree(sheaf);
stat(s, SHEAF_FREE);
base-commit: 7dff99b354601dd01829e1511711846e04340a69
--
2.53.0.414.gf7e9f6c205-goog