[PATCH v5 08/21] mm/hugetlb: Initialize page table lock for vmemmap

From: Muchun Song
Date: Fri Nov 20 2020 - 01:47:55 EST


In the later patch, we will use the vmemmap page table lock to
guard the splitting of the vmemmap PMD. So initialize the vmemmap
page table lock.

Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
mm/hugetlb_vmemmap.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 69 insertions(+)

diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index ec70980000d8..bc8546df4a51 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -99,6 +99,8 @@
*/
#define pr_fmt(fmt) "HugeTLB Vmemmap: " fmt

+#include <linux/pagewalk.h>
+#include <linux/mmzone.h>
#include <linux/list.h>
#include <asm/pgalloc.h>
#include "hugetlb_vmemmap.h"
@@ -208,3 +210,70 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
pr_debug("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
h->name);
}
+
+static int __init vmemmap_pud_entry(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ struct page *page = pud_page(*pud);
+
+ /*
+ * The page->private shares storage with page->ptl. So make sure
+ * that the PG_private is not set and initialize page->private to
+ * zero.
+ */
+ VM_BUG_ON_PAGE(PagePrivate(page), page);
+ set_page_private(page, 0);
+
+ BUG_ON(!pmd_ptlock_init(page));
+
+ return 0;
+}
+
+static void __init vmemmap_ptlock_init_section(unsigned long start_pfn)
+{
+ unsigned long section_nr;
+ struct mem_section *ms;
+ struct page *memmap, *memmap_end;
+ struct mm_struct *mm = &init_mm;
+
+ const struct mm_walk_ops ops = {
+ .pud_entry = vmemmap_pud_entry,
+ };
+
+ section_nr = pfn_to_section_nr(start_pfn);
+ ms = __nr_to_section(section_nr);
+ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+ memmap_end = memmap + PAGES_PER_SECTION;
+
+ mmap_read_lock(mm);
+ BUG_ON(walk_page_range_novma(mm, (unsigned long)memmap,
+ (unsigned long)memmap_end,
+ &ops, NULL, NULL));
+ mmap_read_unlock(mm);
+}
+
+static void __init vmemmap_ptlock_init_node(int nid)
+{
+ unsigned long pfn, end_pfn;
+ struct pglist_data *pgdat = NODE_DATA(nid);
+
+ pfn = pgdat->node_start_pfn;
+ end_pfn = pgdat_end_pfn(pgdat);
+
+ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
+ vmemmap_ptlock_init_section(pfn);
+}
+
+static int __init vmemmap_ptlock_init(void)
+{
+ int nid;
+
+ if (!hugepages_supported())
+ return 0;
+
+ for_each_online_node(nid)
+ vmemmap_ptlock_init_node(nid);
+
+ return 0;
+}
+core_initcall(vmemmap_ptlock_init);
--
2.11.0