[PATCH RFC 06/10] mm/hugetlb: Make page_vma_mapped_walk() RCU-safe
From: Peter Xu
Date: Sun Oct 30 2022 - 17:31:37 EST
RCU makes sure the pte_t* won't go away from under us. Please refer to the
comment above huge_pte_offset() for more information.
Signed-off-by: Peter Xu <peterx@xxxxxxxxxx>
---
include/linux/rmap.h | 3 +++
mm/page_vma_mapped.c | 7 ++++++-
2 files changed, 9 insertions(+), 1 deletion(-)
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bd3504d11b15..d2c5e69a56f2 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -408,6 +408,9 @@ static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
pte_unmap(pvmw->pte);
if (pvmw->ptl)
spin_unlock(pvmw->ptl);
+ /* Hugetlb uses RCU lock for safe access of huge_pte_offset() */
+ if (is_vm_hugetlb_page(pvmw->vma))
+ rcu_read_unlock();
}
bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 93e13fc17d3c..513210a59d7b 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -169,10 +169,15 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
if (pvmw->pte)
return not_found(pvmw);
+ /* For huge_pte_offset() */
+ rcu_read_lock();
+
/* when pud is not present, pte will be NULL */
pvmw->pte = huge_pte_offset(mm, pvmw->address, size);
- if (!pvmw->pte)
+ if (!pvmw->pte) {
+ rcu_read_unlock();
return false;
+ }
pvmw->ptl = huge_pte_lock(hstate, mm, pvmw->pte);
if (!check_pte(pvmw))
--
2.37.3