[tip: sched/core] sched/wait_bit, mm/filemap: Increase page and bit waitqueue hash size
From: tip-bot2 for Nicholas Piggin
Date: Wed Mar 17 2021 - 08:39:38 EST
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 873d7c4c6a920d43ff82e44121e54053d4edba93
Gitweb: https://git.kernel.org/tip/873d7c4c6a920d43ff82e44121e54053d4edba93
Author: Nicholas Piggin <npiggin@xxxxxxxxx>
AuthorDate: Wed, 17 Mar 2021 17:54:27 +10:00
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitterDate: Wed, 17 Mar 2021 09:32:30 +01:00
sched/wait_bit, mm/filemap: Increase page and bit waitqueue hash size
The page waitqueue hash is a bit small (256 entries) on very big systems. A
16 socket 1536 thread POWER9 system was found to encounter hash collisions
and excessive time in waitqueue locking at times. This was intermittent and
hard to reproduce easily with the setup we had (very little real IO
capacity). The theory is that sometimes (depending on allocation luck)
important pages would happen to collide a lot in the hash, slowing down page
locking, causing the problem to snowball.
An small test case was made where threads would write and fsync different
pages, generating just a small amount of contention across many pages.
Increasing page waitqueue hash size to 262144 entries increased throughput
by 182% while also reducing standard deviation 3x. perf before the increase:
36.23% [k] _raw_spin_lock_irqsave - -
|
|--34.60%--wake_up_page_bit
| 0
| iomap_write_end.isra.38
| iomap_write_actor
| iomap_apply
| iomap_file_buffered_write
| xfs_file_buffered_aio_write
| new_sync_write
17.93% [k] native_queued_spin_lock_slowpath - -
|
|--16.74%--_raw_spin_lock_irqsave
| |
| --16.44%--wake_up_page_bit
| iomap_write_end.isra.38
| iomap_write_actor
| iomap_apply
| iomap_file_buffered_write
| xfs_file_buffered_aio_write
This patch uses alloc_large_system_hash to allocate a bigger system hash
that scales somewhat with memory size. The bit/var wait-queue is also
changed to keep code matching, albiet with a smaller scale factor.
A very small CONFIG_BASE_SMALL option is also added because these are two
of the biggest static objects in the image on very small systems.
This hash could be made per-node, which may help reduce remote accesses
on well localised workloads, but that adds some complexity with indexing
and hotplug, so until we get a less artificial workload to test with,
keep it simple.
Signed-off-by: Nicholas Piggin <npiggin@xxxxxxxxx>
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
Acked-by: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
Link: https://lore.kernel.org/r/20210317075427.587806-1-npiggin@xxxxxxxxx
---
kernel/sched/wait_bit.c | 30 +++++++++++++++++++++++-------
mm/filemap.c | 24 +++++++++++++++++++++---
2 files changed, 44 insertions(+), 10 deletions(-)
diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c
index 02ce292..dba73de 100644
--- a/kernel/sched/wait_bit.c
+++ b/kernel/sched/wait_bit.c
@@ -2,19 +2,24 @@
/*
* The implementation of the wait_bit*() and related waiting APIs:
*/
+#include <linux/memblock.h>
#include "sched.h"
-#define WAIT_TABLE_BITS 8
-#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
-
-static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
+#define BIT_WAIT_TABLE_SIZE (1 << bit_wait_table_bits)
+#if CONFIG_BASE_SMALL
+static const unsigned int bit_wait_table_bits = 3;
+static wait_queue_head_t bit_wait_table[BIT_WAIT_TABLE_SIZE] __cacheline_aligned;
+#else
+static unsigned int bit_wait_table_bits __ro_after_init;
+static wait_queue_head_t *bit_wait_table __ro_after_init;
+#endif
wait_queue_head_t *bit_waitqueue(void *word, int bit)
{
const int shift = BITS_PER_LONG == 32 ? 5 : 6;
unsigned long val = (unsigned long)word << shift | bit;
- return bit_wait_table + hash_long(val, WAIT_TABLE_BITS);
+ return bit_wait_table + hash_long(val, bit_wait_table_bits);
}
EXPORT_SYMBOL(bit_waitqueue);
@@ -152,7 +157,7 @@ EXPORT_SYMBOL(wake_up_bit);
wait_queue_head_t *__var_waitqueue(void *p)
{
- return bit_wait_table + hash_ptr(p, WAIT_TABLE_BITS);
+ return bit_wait_table + hash_ptr(p, bit_wait_table_bits);
}
EXPORT_SYMBOL(__var_waitqueue);
@@ -246,6 +251,17 @@ void __init wait_bit_init(void)
{
int i;
- for (i = 0; i < WAIT_TABLE_SIZE; i++)
+ if (!CONFIG_BASE_SMALL) {
+ bit_wait_table = alloc_large_system_hash("bit waitqueue hash",
+ sizeof(wait_queue_head_t),
+ 0,
+ 22,
+ 0,
+ &bit_wait_table_bits,
+ NULL,
+ 0,
+ 0);
+ }
+ for (i = 0; i < BIT_WAIT_TABLE_SIZE; i++)
init_waitqueue_head(bit_wait_table + i);
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 4370048..dbbb5b9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -34,6 +34,7 @@
#include <linux/security.h>
#include <linux/cpuset.h>
#include <linux/hugetlb.h>
+#include <linux/memblock.h>
#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include <linux/shmem_fs.h>
@@ -990,19 +991,36 @@ EXPORT_SYMBOL(__page_cache_alloc);
* at a cost of "thundering herd" phenomena during rare hash
* collisions.
*/
-#define PAGE_WAIT_TABLE_BITS 8
-#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
+#define PAGE_WAIT_TABLE_SIZE (1 << page_wait_table_bits)
+#if CONFIG_BASE_SMALL
+static const unsigned int page_wait_table_bits = 4;
static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
+#else
+static unsigned int page_wait_table_bits __ro_after_init;
+static wait_queue_head_t *page_wait_table __ro_after_init;
+#endif
static wait_queue_head_t *page_waitqueue(struct page *page)
{
- return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
+ return &page_wait_table[hash_ptr(page, page_wait_table_bits)];
}
void __init pagecache_init(void)
{
int i;
+ if (!CONFIG_BASE_SMALL) {
+ page_wait_table = alloc_large_system_hash("page waitqueue hash",
+ sizeof(wait_queue_head_t),
+ 0,
+ 21,
+ 0,
+ &page_wait_table_bits,
+ NULL,
+ 0,
+ 0);
+ }
+
for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
init_waitqueue_head(&page_wait_table[i]);