[RFC PATCH v2 4/5] hugetlb: parallelize 2M hugetlb allocation and initialization

From: Gang Li
Date: Thu Dec 07 2023 - 21:53:28 EST


By distributing both the allocation and the initialization tasks across
multiple threads, the initialization of 2M hugetlb will be faster,
thereby improving the boot speed.

This patch can achieve 60% improvement in performance.

test no patch(ms) patched(ms) saved
------------------- -------------- ------------- --------
256c2t(4 node) 2M 2624 956 63.57%
128c1t(2 node) 2M 1788 684 61.74%

Signed-off-by: Gang Li <gang.li@xxxxxxxxx>
---
mm/hugetlb.c | 71 ++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 52 insertions(+), 19 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8de1653fc4c4f..033e359fdb86b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -35,6 +35,7 @@
#include <linux/delayacct.h>
#include <linux/memory.h>
#include <linux/mm_inline.h>
+#include <linux/padata.h>

#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -3502,6 +3503,37 @@ static void __init hugetlb_hstate_alloc_pages_report(unsigned long allocated, st
}
}

+static void __init hugetlb_alloc_node(unsigned long start, unsigned long end, void *arg)
+{
+ struct hstate *h = (struct hstate *)arg;
+ int i, num = end - start;
+ nodemask_t node_alloc_noretry;
+ unsigned long flags;
+
+ /* Bit mask controlling how hard we retry per-node allocations.*/
+ nodes_clear(node_alloc_noretry);
+
+ for (i = 0; i < num; ++i) {
+ struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
+ &node_alloc_noretry);
+ if (!folio)
+ break;
+ spin_lock_irqsave(&hugetlb_lock, flags);
+ __prep_account_new_huge_page(h, folio_nid(folio));
+ enqueue_hugetlb_folio(h, folio);
+ spin_unlock_irqrestore(&hugetlb_lock, flags);
+ cond_resched();
+ }
+}
+
+static void __init hugetlb_vmemmap_optimize_node(unsigned long start, unsigned long end, void *arg)
+{
+ struct hstate *h = (struct hstate *)arg;
+ int nid = start;
+
+ hugetlb_vmemmap_optimize_folios(h, &h->hugepage_freelists[nid]);
+}
+
static unsigned long __init hugetlb_hstate_alloc_pages_gigantic(struct hstate *h)
{
unsigned long i;
@@ -3521,26 +3553,27 @@ static unsigned long __init hugetlb_hstate_alloc_pages_gigantic(struct hstate *h

static unsigned long __init hugetlb_hstate_alloc_pages_non_gigantic(struct hstate *h)
{
- unsigned long i;
- struct folio *folio;
- LIST_HEAD(folio_list);
- nodemask_t node_alloc_noretry;
-
- /* Bit mask controlling how hard we retry per-node allocations.*/
- nodes_clear(node_alloc_noretry);
-
- for (i = 0; i < h->max_huge_pages; ++i) {
- folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
- &node_alloc_noretry);
- if (!folio)
- break;
- list_add(&folio->lru, &folio_list);
- cond_resched();
- }
-
- prep_and_add_allocated_folios(h, &folio_list);
+ struct padata_mt_job job = {
+ .fn_arg = h,
+ .align = 1,
+ .numa_aware = true,
+ };

- return i;
+ job.thread_fn = hugetlb_alloc_node,
+ job.start = 0,
+ job.size = h->max_huge_pages,
+ job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2,
+ job.max_threads = num_node_state(N_MEMORY) * 2,
+ padata_do_multithreaded(&job);
+
+ job.thread_fn = hugetlb_vmemmap_optimize_node,
+ job.start = 0,
+ job.size = num_node_state(N_MEMORY),
+ job.min_chunk = 1,
+ job.max_threads = num_node_state(N_MEMORY),
+ padata_do_multithreaded(&job);
+
+ return h->nr_huge_pages;
}

/*
--
2.30.2