[PATCH] mm: kmemleak: Add an emergency allocation pool for kmemleak objects
From: Catalin Marinas
Date: Wed Mar 27 2019 - 13:20:57 EST
This patch adds an emergency pool for struct kmemleak_object in case the
normal kmem_cache_alloc() fails under the gfp constraints passed by the
slab allocation caller. The patch also removes __GFP_NOFAIL which does
not play well with other gfp flags (introduced by commit d9570ee3bd1d,
"kmemleak: allow to coexist with fault injection").
Suggested-by: Michal Hocko <mhocko@xxxxxxxxxx>
Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx>
---
mm/kmemleak.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 57 insertions(+), 2 deletions(-)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 6c318f5ac234..366a680cff7c 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -127,7 +127,7 @@
/* GFP bitmask for kmemleak internal allocations */
#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
__GFP_NORETRY | __GFP_NOMEMALLOC | \
- __GFP_NOWARN | __GFP_NOFAIL)
+ __GFP_NOWARN)
/* scanning area inside a memory block */
struct kmemleak_scan_area {
@@ -191,11 +191,16 @@ struct kmemleak_object {
#define HEX_ASCII 1
/* max number of lines to be printed */
#define HEX_MAX_LINES 2
+/* minimum emergency pool size */
+#define EMERGENCY_POOL_SIZE (NR_CPUS * 4)
/* the list of all allocated objects */
static LIST_HEAD(object_list);
/* the list of gray-colored objects (see color_gray comment below) */
static LIST_HEAD(gray_list);
+/* emergency pool allocation */
+static LIST_HEAD(emergency_list);
+static int emergency_pool_size;
/* search tree for object boundaries */
static struct rb_root object_tree_root = RB_ROOT;
/* rw_lock protecting the access to object_list and object_tree_root */
@@ -467,6 +472,43 @@ static int get_object(struct kmemleak_object *object)
return atomic_inc_not_zero(&object->use_count);
}
+/*
+ * Emergency pool allocation and freeing. kmemleak_lock must not be held.
+ */
+static struct kmemleak_object *emergency_alloc(void)
+{
+ unsigned long flags;
+ struct kmemleak_object *object;
+
+ write_lock_irqsave(&kmemleak_lock, flags);
+ object = list_first_entry_or_null(&emergency_list, typeof(*object), object_list);
+ if (object) {
+ list_del(&object->object_list);
+ emergency_pool_size--;
+ }
+ write_unlock_irqrestore(&kmemleak_lock, flags);
+
+ return object;
+}
+
+/*
+ * Return true if object added to the emergency pool, false otherwise.
+ */
+static bool emergency_free(struct kmemleak_object *object)
+{
+ unsigned long flags;
+
+ if (emergency_pool_size >= EMERGENCY_POOL_SIZE)
+ return false;
+
+ write_lock_irqsave(&kmemleak_lock, flags);
+ list_add(&object->object_list, &emergency_list);
+ emergency_pool_size++;
+ write_unlock_irqrestore(&kmemleak_lock, flags);
+
+ return true;
+}
+
/*
* RCU callback to free a kmemleak_object.
*/
@@ -485,7 +527,8 @@ static void free_object_rcu(struct rcu_head *rcu)
hlist_del(&area->node);
kmem_cache_free(scan_area_cache, area);
}
- kmem_cache_free(object_cache, object);
+ if (!emergency_free(object))
+ kmem_cache_free(object_cache, object);
}
/*
@@ -577,6 +620,8 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
unsigned long untagged_ptr;
object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
+ if (!object)
+ object = emergency_alloc();
if (!object) {
pr_warn("Cannot allocate a kmemleak_object structure\n");
kmemleak_disable();
@@ -2127,6 +2172,16 @@ void __init kmemleak_init(void)
kmemleak_warning = 0;
}
}
+
+ /* populate the emergency allocation pool */
+ while (emergency_pool_size < EMERGENCY_POOL_SIZE) {
+ struct kmemleak_object *object;
+
+ object = kmem_cache_alloc(object_cache, GFP_KERNEL);
+ if (!object)
+ break;
+ emergency_free(object);
+ }
}
/*