[PATCH v2 03/17] mm/slab: remove the checks for slab implementation bug

From: js1304
Date: Fri Feb 26 2016 - 01:01:47 EST


From: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>

Some of "#if DEBUG" are for reporting slab implementation bug rather than
user usecase bug. It's not really needed because slab is stable for a
quite long time and it makes code too dirty. This patch remove it.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
Cc: Christoph Lameter <cl@xxxxxxxxx>
Cc: Pekka Enberg <penberg@xxxxxxxxxx>
Cc: David Rientjes <rientjes@xxxxxxxxxx>
Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
Cc: Jesper Dangaard Brouer <brouer@xxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
---
mm/slab.c | 29 +++++++----------------------
1 file changed, 7 insertions(+), 22 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index 3634dc1..14c3f9c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2110,8 +2110,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON;
#endif
- if (flags & SLAB_DESTROY_BY_RCU)
- BUG_ON(flags & SLAB_POISON);
#endif

/*
@@ -2368,9 +2366,6 @@ static int drain_freelist(struct kmem_cache *cache,
}

page = list_entry(p, struct page, lru);
-#if DEBUG
- BUG_ON(page->active);
-#endif
list_del(&page->lru);
/*
* Safe to drop the lock. The slab is no longer linked
@@ -2528,30 +2523,23 @@ static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
}
}

-static void *slab_get_obj(struct kmem_cache *cachep, struct page *page,
- int nodeid)
+static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
{
void *objp;

objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
page->active++;
-#if DEBUG
- WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
-#endif

return objp;
}

-static void slab_put_obj(struct kmem_cache *cachep, struct page *page,
- void *objp, int nodeid)
+static void slab_put_obj(struct kmem_cache *cachep,
+ struct page *page, void *objp)
{
unsigned int objnr = obj_to_index(cachep, page, objp);
#if DEBUG
unsigned int i;

- /* Verify that the slab belongs to the intended node */
- WARN_ON(page_to_nid(virt_to_page(objp)) != nodeid);
-
/* Verify double free bug */
for (i = page->active; i < cachep->num; i++) {
if (get_free_obj(page, i) == objnr) {
@@ -2817,8 +2805,7 @@ retry:
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);

- ac_put_obj(cachep, ac, slab_get_obj(cachep, page,
- node));
+ ac_put_obj(cachep, ac, slab_get_obj(cachep, page));
}

/* move slabp to correct slabp list: */
@@ -3101,7 +3088,7 @@ retry:

BUG_ON(page->active == cachep->num);

- obj = slab_get_obj(cachep, page, nodeid);
+ obj = slab_get_obj(cachep, page);
n->free_objects--;
/* move slabp to correct slabp list: */
list_del(&page->lru);
@@ -3252,7 +3239,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
page = virt_to_head_page(objp);
list_del(&page->lru);
check_spinlock_acquired_node(cachep, node);
- slab_put_obj(cachep, page, objp, node);
+ slab_put_obj(cachep, page, objp);
STATS_DEC_ACTIVE(cachep);
n->free_objects++;

@@ -3282,9 +3269,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
LIST_HEAD(list);

batchcount = ac->batchcount;
-#if DEBUG
- BUG_ON(!batchcount || batchcount > ac->avail);
-#endif
+
check_irq_off();
n = get_node(cachep, node);
spin_lock(&n->list_lock);
--
1.9.1