[PATCH v3 11/15] mm/memory: factor out copying the actual PTE in copy_present_pte()
From: David Hildenbrand
Date: Mon Jan 29 2024 - 07:52:57 EST
Let's prepare for further changes.
Reviewed-by: Ryan Roberts <ryan.roberts@xxxxxxx>
Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>
---
mm/memory.c | 63 ++++++++++++++++++++++++++++-------------------------
1 file changed, 33 insertions(+), 30 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 8d14ba440929..a3bdb25f4c8d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -930,6 +930,29 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma
return 0;
}
+static inline void __copy_present_pte(struct vm_area_struct *dst_vma,
+ struct vm_area_struct *src_vma, pte_t *dst_pte, pte_t *src_pte,
+ pte_t pte, unsigned long addr)
+{
+ struct mm_struct *src_mm = src_vma->vm_mm;
+
+ /* If it's a COW mapping, write protect it both processes. */
+ if (is_cow_mapping(src_vma->vm_flags) && pte_write(pte)) {
+ ptep_set_wrprotect(src_mm, addr, src_pte);
+ pte = pte_wrprotect(pte);
+ }
+
+ /* If it's a shared mapping, mark it clean in the child. */
+ if (src_vma->vm_flags & VM_SHARED)
+ pte = pte_mkclean(pte);
+ pte = pte_mkold(pte);
+
+ if (!userfaultfd_wp(dst_vma))
+ pte = pte_clear_uffd_wp(pte);
+
+ set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
+}
+
/*
* Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page
* is required to copy this pte.
@@ -939,23 +962,23 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
struct folio **prealloc)
{
- struct mm_struct *src_mm = src_vma->vm_mm;
- unsigned long vm_flags = src_vma->vm_flags;
pte_t pte = ptep_get(src_pte);
struct page *page;
struct folio *folio;
page = vm_normal_page(src_vma, addr, pte);
- if (page)
- folio = page_folio(page);
- if (page && folio_test_anon(folio)) {
+ if (unlikely(!page))
+ goto copy_pte;
+
+ folio = page_folio(page);
+ folio_get(folio);
+ if (folio_test_anon(folio)) {
/*
* If this page may have been pinned by the parent process,
* copy the page immediately for the child so that we'll always
* guarantee the pinned page won't be randomly replaced in the
* future.
*/
- folio_get(folio);
if (unlikely(folio_try_dup_anon_rmap_pte(folio, page, src_vma))) {
/* Page may be pinned, we have to copy. */
folio_put(folio);
@@ -963,34 +986,14 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
addr, rss, prealloc, page);
}
rss[MM_ANONPAGES]++;
- } else if (page) {
- folio_get(folio);
+ VM_WARN_ON_FOLIO(PageAnonExclusive(page), folio);
+ } else {
folio_dup_file_rmap_pte(folio, page);
rss[mm_counter_file(folio)]++;
}
- /*
- * If it's a COW mapping, write protect it both
- * in the parent and the child
- */
- if (is_cow_mapping(vm_flags) && pte_write(pte)) {
- ptep_set_wrprotect(src_mm, addr, src_pte);
- pte = pte_wrprotect(pte);
- }
- VM_BUG_ON(page && folio_test_anon(folio) && PageAnonExclusive(page));
-
- /*
- * If it's a shared mapping, mark it clean in
- * the child
- */
- if (vm_flags & VM_SHARED)
- pte = pte_mkclean(pte);
- pte = pte_mkold(pte);
-
- if (!userfaultfd_wp(dst_vma))
- pte = pte_clear_uffd_wp(pte);
-
- set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
+copy_pte:
+ __copy_present_pte(dst_vma, src_vma, dst_pte, src_pte, pte, addr);
return 0;
}
--
2.43.0