[PATCH RFC 07/10] mm/hugetlb: Make hugetlb_follow_page_mask() RCU-safe
From: Peter Xu
Date: Sun Oct 30 2022 - 17:31:33 EST
RCU makes sure the pte_t* won't go away from under us. Please refer to the
comment above huge_pte_offset() for more information.
Signed-off-by: Peter Xu <peterx@xxxxxxxxxx>
---
mm/hugetlb.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9869c12e6460..85214095fb85 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6229,10 +6229,12 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
if (WARN_ON_ONCE(flags & FOLL_PIN))
return NULL;
+ /* For huge_pte_offset() */
+ rcu_read_lock();
retry:
pte = huge_pte_offset(mm, haddr, huge_page_size(h));
if (!pte)
- return NULL;
+ goto out_rcu;
ptl = huge_pte_lock(h, mm, pte);
entry = huge_ptep_get(pte);
@@ -6266,6 +6268,8 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
}
out:
spin_unlock(ptl);
+out_rcu:
+ rcu_read_unlock();
return page;
}
--
2.37.3