From 1ac0fa34783f66ea7334fd2e9129bc362bee51dd Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Sat, 13 May 2023 01:10:34 +0000 Subject: [PATCH] mm: zsmalloc: share slab caches for all zsmalloc zpools Zswap creates 32 zpools to improve concurrency. Each zsmalloc zpool creates its own 'zs_handle' and 'zspage' slab caches. We end up with 32 slab caches of each type. Since each slab cache holds some free objects, we end up with a lot of free objects distributed among the separate zpool caches. Slab caches are designed to handle concurrent allocations by using percpu structures, so having a single instance of each cache should be enough, and avoids wasting more memory than needed due to fragmentation. Additionally, having more slab caches than needed unnecessarily slows down code paths that iterate slab_caches. Signed-off-by: Yosry Ahmed --- mm/zsmalloc.c | 60 ++++++++++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index b42d3545ca856..c3c90618ec8b4 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -289,29 +289,8 @@ static void init_deferred_free(struct zs_pool *pool) {} static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} #endif -static int create_cache(struct zs_pool *pool) -{ - pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, - 0, 0, NULL); - if (!pool->handle_cachep) - return 1; - - pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), - 0, 0, NULL); - if (!pool->zspage_cachep) { - kmem_cache_destroy(pool->handle_cachep); - pool->handle_cachep = NULL; - return 1; - } - - return 0; -} - -static void destroy_cache(struct zs_pool *pool) -{ - kmem_cache_destroy(pool->handle_cachep); - kmem_cache_destroy(pool->zspage_cachep); -} +static struct kmem_cache *zs_handle_cache; +static struct kmem_cache *zspage_cache; static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) { @@ -2107,13 +2086,13 @@ struct zs_pool *zs_create_pool(const char *name) spin_lock_init(&pool->lock); atomic_set(&pool->compaction_in_progress, 0); + pool->handle_cachep = zs_handle_cache; + pool->zspage_cachep = zspage_cache; + pool->name = kstrdup(name, GFP_KERNEL); if (!pool->name) goto err; - if (create_cache(pool)) - goto err; - /* * Iterate reversely, because, size of size_class that we want to use * for merging should be larger or equal to current size. @@ -2234,16 +2213,41 @@ void zs_destroy_pool(struct zs_pool *pool) kfree(class); } - destroy_cache(pool); kfree(pool->name); kfree(pool); } EXPORT_SYMBOL_GPL(zs_destroy_pool); +static void zs_destroy_caches(void) +{ + kmem_cache_destroy(zs_handle_cache); + kmem_cache_destroy(zspage_cache); + zs_handle_cache = NULL; + zspage_cache = NULL; +} + +static int zs_create_caches(void) +{ + zs_handle_cache = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, + 0, 0, NULL); + zspage_cache = kmem_cache_create("zspage", sizeof(struct zspage), + 0, 0, NULL); + + if (!zs_handle_cache || !zspage_cache) { + zs_destroy_caches(); + return -1; + } + return 0; +} + static int __init zs_init(void) { int ret; + ret = zs_create_caches(); + if (ret) + goto out; + ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare", zs_cpu_prepare, zs_cpu_dead); if (ret) @@ -2258,6 +2262,7 @@ static int __init zs_init(void) return 0; out: + zs_destroy_caches(); return ret; } @@ -2269,6 +2274,7 @@ static void __exit zs_exit(void) cpuhp_remove_state(CPUHP_MM_ZS_PREPARE); zs_stat_exit(); + zs_destroy_caches(); } module_init(zs_init); -- 2.45.1.288.g0e0cd299f1-goog