[FIX] slub: Remove kmalloc under list_lock from list_slab_objects() V2
From: Christopher Lameter
Date: Mon Nov 11 2019 - 10:55:08 EST
Regardless of the issue with memcgs allowing allocations from its
kmalloc array during shutdown: This patch cleans things up and properly
allocates the bitmap outside of the list_lock.
[FIX] slub: Remove kmalloc under list_lock from list_slab_objects() V2
V1->V2 : Properly handle CONFIG_SLUB_DEBUG. Handle bitmap free correctly.
list_slab_objects() is called when a slab is destroyed and there are objects still left
to list the objects in the syslog. This is a pretty rare event.
And there it seems we take the list_lock and call kmalloc while holding that lock.
Perform the allocation in free_partial() before the list_lock is taken.
Fixes: bbd7d57bfe852d9788bae5fb171c7edb4021d8ac ("slub: Potential stack overflow")
Signed-off-by: Christoph Lameter
Index: linux/mm/slub.c
===================================================================
--- linux.orig/mm/slub.c 2019-10-15 13:54:57.032655296 +0000
+++ linux/mm/slub.c 2019-11-11 15:52:11.616397853 +0000
@@ -3690,14 +3690,15 @@ error:
}
static void list_slab_objects(struct kmem_cache *s, struct page *page,
- const char *text)
+ const char *text, unsigned long *map)
{
#ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page);
void *p;
- unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
+
if (!map)
return;
+
slab_err(s, page, text, s->name);
slab_lock(page);
@@ -3710,7 +3711,6 @@ static void list_slab_objects(struct kme
}
}
slab_unlock(page);
- bitmap_free(map);
#endif
}
@@ -3723,6 +3723,11 @@ static void free_partial(struct kmem_cac
{
LIST_HEAD(discard);
struct page *page, *h;
+ unsigned long *map = NULL;
+
+#ifdef CONFIG_SLUB_DEBUG
+ map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
+#endif
BUG_ON(irqs_disabled());
spin_lock_irq(&n->list_lock);
@@ -3732,11 +3737,16 @@ static void free_partial(struct kmem_cac
list_add(&page->slab_list, &discard);
} else {
list_slab_objects(s, page,
- "Objects remaining in %s on __kmem_cache_shutdown()");
+ "Objects remaining in %s on __kmem_cache_shutdown()",
+ map);
}
}
spin_unlock_irq(&n->list_lock);
+#ifdef CONFIG_SLUB_DEBUG
+ bitmap_free(map);
+#endif
+
list_for_each_entry_safe(page, h, &discard, slab_list)
discard_slab(s, page);
}