[RFC PATCH 6/7] mm/slub: make slab data more observable

From: Ruipeng Qi
Date: Thu Dec 21 2023 - 08:50:52 EST


From: qiruipeng <qiruipeng@xxxxxxxxxxx>

Osdump is interested in data stored within slab subsystem. Add full list
back into corresponding struct, and record full list within respective
functions instead of enabling SLUB_DEBUG directly, which will intruduce
sensible overhead.

Signed-off-by: qiruipeng <qiruipeng@xxxxxxxxxxx>
---
mm/slab.h | 2 ++
mm/slub.c | 38 +++++++++++++++++++++++++++++++++++++-
2 files changed, 39 insertions(+), 1 deletion(-)

diff --git a/mm/slab.h b/mm/slab.h
index 3d07fb428393..a42a54c9c5de 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -799,6 +799,8 @@ struct kmem_cache_node {
atomic_long_t nr_slabs;
atomic_long_t total_objects;
struct list_head full;
+#elif defined(CONFIG_OS_MINIDUMP)
+ struct list_head full;
#endif
#endif

diff --git a/mm/slub.c b/mm/slub.c
index 63d281dfacdb..1a496ec945b6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1730,10 +1730,26 @@ static inline int check_object(struct kmem_cache *s, struct slab *slab,
static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
static inline void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr) {}
+#ifndef CONFIG_OS_MINIDUMP
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct slab *slab) {}
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct slab *slab) {}
+#else
+static inline void add_full(struct kmem_cache *s,
+ struct kmem_cache_node *n, struct slab *slab)
+{
+ lockdep_assert_held(&n->list_lock);
+ list_add(&slab->slab_list, &n->full);
+}
+
+static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
+{
+ lockdep_assert_held(&n->list_lock);
+ list_del(&slab->slab_list);
+}
+#endif
+
slab_flags_t kmem_cache_flags(unsigned int object_size,
slab_flags_t flags, const char *name)
{
@@ -2570,6 +2586,14 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
spin_lock_irqsave(&n->list_lock, flags);
} else {
mode = M_FULL_NOLIST;
+#ifdef CONFIG_OS_MINIDUMP
+ /*
+ * Taking the spinlock removes the possibility that
+ * acquire_slab() will see a slab that is frozen
+ */
+ spin_lock_irqsave(&n->list_lock, flags);
+
+#endif
}


@@ -2577,7 +2601,11 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
old.freelist, old.counters,
new.freelist, new.counters,
"unfreezing slab")) {
+#ifndef CONFIG_OS_MINIDUMP
if (mode == M_PARTIAL)
+#else
+ if (mode != M_FREE)
+#endif
spin_unlock_irqrestore(&n->list_lock, flags);
goto redo;
}
@@ -2592,6 +2620,10 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
discard_slab(s, slab);
stat(s, FREE_SLAB);
} else if (mode == M_FULL_NOLIST) {
+#ifdef CONFIG_OS_MINIDUMP
+ add_full(s, n, slab);
+ spin_unlock_irqrestore(&n->list_lock, flags);
+#endif
stat(s, DEACTIVATE_FULL);
}
}
@@ -4202,6 +4234,9 @@ init_kmem_cache_node(struct kmem_cache_node *n)
atomic_long_set(&n->nr_slabs, 0);
atomic_long_set(&n->total_objects, 0);
INIT_LIST_HEAD(&n->full);
+#elif defined(CONFIG_OS_MINIDUMP)
+ INIT_LIST_HEAD(&n->full);
+
#endif
}

@@ -5009,7 +5044,8 @@ static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
list_for_each_entry(p, &n->partial, slab_list)
p->slab_cache = s;

-#ifdef CONFIG_SLUB_DEBUG
+#if defined(CONFIG_SLUB_DEBUG) || \
+ defined(CONFIG_OS_MINIDUMP)
list_for_each_entry(p, &n->full, slab_list)
p->slab_cache = s;
#endif
--
2.17.1