[PATCH v2 3/5] mm/hugetlb: fix races when looking up a CONT-PMD size hugetlb page
From: Baolin Wang
Date: Tue Aug 23 2022 - 03:51:26 EST
On some architectures (like ARM64), it can support CONT-PTE/PMD size
hugetlb, which means it can support not only PMD/PUD size hugetlb
(2M and 1G), but also CONT-PTE/PMD size(64K and 32M) if a 4K page size
specified.
When looking up a CONT-PMD size hugetlb page by follow_page(), it will
always use the PMD page lock to protect the pmd entry in follow_huge_pmd().
However this is not the correct lock for CONT-PMD size hugetlb, and the
pmd entry will be unstable under the incorrect lock, which means it still
can be migrated or poisoned, and can not get the correct CONT-PMD size
page.
Thus changing to use huge_pte_lock() to get the correct pmd entry lock
for CONT-PMD size hugetlb to fix the potential race.
Signed-off-by: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx>
---
include/linux/hugetlb.h | 4 ++--
mm/gup.c | 2 +-
mm/hugetlb.c | 7 ++++---
3 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 4b172a7..3a96f67 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -209,7 +209,7 @@ struct page *follow_huge_pd(struct vm_area_struct *vma,
int flags, int pdshift);
struct page *follow_huge_pte(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, int flags);
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+struct page *follow_huge_pmd(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags);
@@ -320,7 +320,7 @@ static inline struct page *follow_huge_pte(struct vm_area_struct *vma,
return NULL;
}
-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
+static inline struct page *follow_huge_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, int flags)
{
return NULL;
diff --git a/mm/gup.c b/mm/gup.c
index 87a94f5..014accd 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -673,7 +673,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (pmd_none(pmdval))
return no_page_table(vma, flags);
if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
- page = follow_huge_pmd(mm, address, pmd, flags);
+ page = follow_huge_pmd(vma, address, pmd, flags);
if (page)
return page;
return no_page_table(vma, flags);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cf742d1..2c4048a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -7035,9 +7035,11 @@ struct page * __weak
}
struct page * __weak
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+follow_huge_pmd(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, int flags)
{
+ struct mm_struct *mm = vma->vm_mm;
+ struct hstate *hstate = hstate_vma(vma);
struct page *page = NULL;
spinlock_t *ptl;
pte_t pte;
@@ -7050,8 +7052,7 @@ struct page * __weak
return NULL;
retry:
- ptl = pmd_lockptr(mm, pmd);
- spin_lock(ptl);
+ ptl = huge_pte_lock(hstate, mm, (pte_t *)pmd);
/*
* make sure that the address range covered by this pmd is not
* unmapped from other threads.
--
1.8.3.1