[PATCH 3/5] mm: use a dedicated workqueue for the free workers

From: Aaron Lu
Date: Fri Feb 24 2017 - 06:41:03 EST


Introduce a workqueue for all the free workers so that user can fine
tune how many workers can be active through sysfs interface: max_active.
More workers will normally lead to better performance, but too many can
cause severe lock contention.

Note that since the zone lock is global, the workqueue is also global
for all processes, i.e. if we set 8 to max_active, we will have at most
8 workers active for all processes that are doing munmap()/exit()/etc.

Signed-off-by: Aaron Lu <aaron.lu@xxxxxxxxx>
---
mm/memory.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)

diff --git a/mm/memory.c b/mm/memory.c
index b98cd25075f0..eb8b17fc1b2b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -254,6 +254,18 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
__tlb_reset_range(tlb);
}

+static struct workqueue_struct *batch_free_wq;
+static int __init batch_free_wq_init(void)
+{
+ batch_free_wq = alloc_workqueue("batch_free_wq", WQ_UNBOUND | WQ_SYSFS, 0);
+ if (!batch_free_wq) {
+ pr_warn("failed to create workqueue batch_free_wq\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+subsys_initcall(batch_free_wq_init);
+
static void tlb_flush_mmu_free_batches(struct mmu_gather_batch *batch_start,
int free_batch_page)
{
@@ -305,7 +317,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
batch_free->batch_start = tlb->local.next;
INIT_WORK(&batch_free->work, batch_free_work);
list_add(&batch_free->list, &tlb->worker_list);
- queue_work(system_unbound_wq, &batch_free->work);
+ queue_work(batch_free_wq, &batch_free->work);

tlb->batch_count = 0;
tlb->local.next = NULL;
--
2.9.3