[RESEND v4 6/7] mm, slab_common: Initialize the same size of kmalloc_caches[]
From: Pengfei Li
Date: Sun Sep 15 2019 - 13:09:14 EST
In the current implementation, KMALLOC_RECLAIM is not initialized
until all the KMALLOC_NORMAL sizes have been initialized.
But for a particular size, create_kmalloc_caches() can be executed
faster by initializing different types of kmalloc in order.
$ ./scripts/bloat-o-meter vmlinux.old vmlinux.patch_1-5
add/remove: 1/2 grow/shrink: 6/64 up/down: 872/-1113 (-241)
Function old new delta
create_kmalloc_caches 270 214 -56
$ ./scripts/bloat-o-meter vmlinux.old vmlinux.patch_1-6
add/remove: 1/2 grow/shrink: 6/64 up/down: 872/-1172 (-300)
Function old new delta
create_kmalloc_caches 270 155 -115
We can see that it really gets the benefits.
Besides, KMALLOC_DMA will be initialized after "slab_state = UP",
this does not seem to be necessary.
Commit f97d5f634d3b ("slab: Common function to create the kmalloc
array") introduces create_kmalloc_caches().
And I found that for SLAB, KMALLOC_DMA is initialized before
"slab_state = UP". But for SLUB, KMALLOC_DMA is initialized after
"slab_state = UP".
Based on this fact, I think it is okay to initialize KMALLOC_DMA
before "slab_state = UP".
Signed-off-by: Pengfei Li <lpf.vector@xxxxxxxxx>
---
mm/slab_common.c | 35 ++++++++++++-----------------------
1 file changed, 12 insertions(+), 23 deletions(-)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 2aed30deb071..e7903bd28b1f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1165,12 +1165,9 @@ void __init setup_kmalloc_cache_index_table(void)
size_index[size_index_elem(i)] = 0;
}
-static void __init
+static __always_inline void __init
new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
{
- if (type == KMALLOC_RECLAIM)
- flags |= SLAB_RECLAIM_ACCOUNT;
-
kmalloc_caches[type][idx] = create_kmalloc_cache(
kmalloc_info[idx].name[type],
kmalloc_info[idx].size, flags, 0,
@@ -1185,30 +1182,22 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
void __init create_kmalloc_caches(slab_flags_t flags)
{
int i;
- enum kmalloc_cache_type type;
- for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
- for (i = 0; i < KMALLOC_CACHE_NUM; i++) {
- if (!kmalloc_caches[type][i])
- new_kmalloc_cache(i, type, flags);
- }
- }
+ for (i = 0; i < KMALLOC_CACHE_NUM; i++) {
+ if (!kmalloc_caches[KMALLOC_NORMAL][i])
+ new_kmalloc_cache(i, KMALLOC_NORMAL, flags);
- /* Kmalloc array is now usable */
- slab_state = UP;
+ new_kmalloc_cache(i, KMALLOC_RECLAIM,
+ flags | SLAB_RECLAIM_ACCOUNT);
#ifdef CONFIG_ZONE_DMA
- for (i = 0; i < KMALLOC_CACHE_NUM; i++) {
- struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
-
- if (s) {
- kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
- kmalloc_info[i].name[KMALLOC_DMA],
- kmalloc_info[i].size,
- SLAB_CACHE_DMA | flags, 0, 0);
- }
- }
+ new_kmalloc_cache(i, KMALLOC_DMA,
+ flags | SLAB_CACHE_DMA);
#endif
+ }
+
+ /* Kmalloc array is now usable */
+ slab_state = UP;
}
#endif /* !CONFIG_SLOB */
--
2.21.0