[PATCH 03/11] mm/slab: drain the free slab as much as possible
From: js1304
Date: Mon Mar 28 2016 - 01:27:41 EST
From: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
slabs_tofree() implies freeing all free slab. We can do it with
just providing INT_MAX.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
---
mm/slab.c | 12 +++---------
1 file changed, 3 insertions(+), 9 deletions(-)
diff --git a/mm/slab.c b/mm/slab.c
index a5a205b..ba2eacf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -888,12 +888,6 @@ static int init_cache_node_node(int node)
return 0;
}
-static inline int slabs_tofree(struct kmem_cache *cachep,
- struct kmem_cache_node *n)
-{
- return (n->free_objects + cachep->num - 1) / cachep->num;
-}
-
static void cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
@@ -958,7 +952,7 @@ free_slab:
n = get_node(cachep, node);
if (!n)
continue;
- drain_freelist(cachep, n, slabs_tofree(cachep, n));
+ drain_freelist(cachep, n, INT_MAX);
}
}
@@ -1110,7 +1104,7 @@ static int __meminit drain_cache_node_node(int node)
if (!n)
continue;
- drain_freelist(cachep, n, slabs_tofree(cachep, n));
+ drain_freelist(cachep, n, INT_MAX);
if (!list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial)) {
@@ -2280,7 +2274,7 @@ int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
check_irq_on();
for_each_kmem_cache_node(cachep, node, n) {
- drain_freelist(cachep, n, slabs_tofree(cachep, n));
+ drain_freelist(cachep, n, INT_MAX);
ret += !list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial);
--
1.9.1