[PATCH] mm, slub: fix memory and cpu hotplug related lock ordering issues
From: Vlastimil Babka
Date: Tue Aug 10 2021 - 04:58:07 EST
Qian Cai reported [1] a lockdep splat on memory offline.
[ 91.374541] WARNING: possible circular locking dependency detected
[ 91.381411] 5.14.0-rc5-next-20210809+ #84 Not tainted
[ 91.387149] ------------------------------------------------------
[ 91.394016] lsbug/1523 is trying to acquire lock:
[ 91.399406] ffff800018e76530 (flush_lock){+.+.}-{3:3}, at: flush_all+0x50/0x1c8
[ 91.407425] but task is already holding lock:
[ 91.414638] ffff800018e48468 (slab_mutex){+.+.}-{3:3}, at: slab_memory_callback+0x44/0x280
[ 91.423603] which lock already depends on the new lock.
To fix it, we need to change the order in flush_all() so that cpus_read_lock()
is first and mutex_lock(&flush_lock) second.
Also when called from slab_mem_going_offline_callback() we are already under
cpus_read_lock() and cannot take it again, so create a flush_all_cpus_locked()
variant and decouple flushing from actual shrinking for this call path.
Additionally, Mike Galbraith reported [2] wrong order of cpus_read_lock() and
slab_mutex in kmem_cache_destroy() path and proposed a fix to reverse it.
This patch is a fixup for the mmotm patch
mm-slub-move-flush_cpu_slab-invocations-__free_slab-invocations-out-of-irq-context.patch
[1] https://lore.kernel.org/lkml/0b36128c-3e12-77df-85fe-a153a714569b@xxxxxxxxxxx/
[2] https://lore.kernel.org/lkml/2eb3cf340716c40f03a0a342ab40219b3d1de195.camel@xxxxxx/
Reported-by: Qian Cai <quic_qiancai@xxxxxxxxxxx>
Reported-by: Mike Galbraith <efault@xxxxxx>
Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx>
---
mm/slab_common.c | 2 ++
mm/slub.c | 29 +++++++++++++++++++++--------
2 files changed, 23 insertions(+), 8 deletions(-)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 1c673c323baf..ec2bb0beed75 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -502,6 +502,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (unlikely(!s))
return;
+ cpus_read_lock();
mutex_lock(&slab_mutex);
s->refcount--;
@@ -516,6 +517,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
}
out_unlock:
mutex_unlock(&slab_mutex);
+ cpus_read_unlock();
}
EXPORT_SYMBOL(kmem_cache_destroy);
diff --git a/mm/slub.c b/mm/slub.c
index da48ada3d17f..152487f84025 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2551,13 +2551,13 @@ static bool has_cpu_slab(int cpu, struct kmem_cache *s)
static DEFINE_MUTEX(flush_lock);
static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
-static void flush_all(struct kmem_cache *s)
+static void flush_all_cpus_locked(struct kmem_cache *s)
{
struct slub_flush_work *sfw;
unsigned int cpu;
+ lockdep_assert_cpus_held();
mutex_lock(&flush_lock);
- cpus_read_lock();
for_each_online_cpu(cpu) {
sfw = &per_cpu(slub_flush, cpu);
@@ -2578,10 +2578,16 @@ static void flush_all(struct kmem_cache *s)
flush_work(&sfw->work);
}
- cpus_read_unlock();
mutex_unlock(&flush_lock);
}
+static void flush_all(struct kmem_cache *s)
+{
+ cpus_read_lock();
+ flush_all_cpus_locked(s);
+ cpus_read_unlock();
+}
+
/*
* Use the cpu notifier to insure that the cpu slabs are flushed when
* necessary.
@@ -4111,7 +4117,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
int node;
struct kmem_cache_node *n;
- flush_all(s);
+ flush_all_cpus_locked(s);
/* Attempt to free all objects */
for_each_kmem_cache_node(s, node, n) {
free_partial(s, n);
@@ -4387,7 +4393,7 @@ EXPORT_SYMBOL(kfree);
* being allocated from last increasing the chance that the last objects
* are freed in them.
*/
-int __kmem_cache_shrink(struct kmem_cache *s)
+int __kmem_cache_do_shrink(struct kmem_cache *s)
{
int node;
int i;
@@ -4399,7 +4405,6 @@ int __kmem_cache_shrink(struct kmem_cache *s)
unsigned long flags;
int ret = 0;
- flush_all(s);
for_each_kmem_cache_node(s, node, n) {
INIT_LIST_HEAD(&discard);
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
@@ -4449,13 +4454,21 @@ int __kmem_cache_shrink(struct kmem_cache *s)
return ret;
}
+int __kmem_cache_shrink(struct kmem_cache *s)
+{
+ flush_all(s);
+ return __kmem_cache_do_shrink(s);
+}
+
static int slab_mem_going_offline_callback(void *arg)
{
struct kmem_cache *s;
mutex_lock(&slab_mutex);
- list_for_each_entry(s, &slab_caches, list)
- __kmem_cache_shrink(s);
+ list_for_each_entry(s, &slab_caches, list) {
+ flush_all_cpus_locked(s);
+ __kmem_cache_do_shrink(s);
+ }
mutex_unlock(&slab_mutex);
return 0;
--
2.32.0