[PATCH v1 4/9] mm/memory: factor out zapping folio pte into zap_present_folio_pte()
From: David Hildenbrand
Date: Mon Jan 29 2024 - 09:34:09 EST
Let's prepare for further changes by factoring it out into a separate
function.
Signed-off-by: David Hildenbrand <david@xxxxxxxxxx>
---
mm/memory.c | 53 ++++++++++++++++++++++++++++++++---------------------
1 file changed, 32 insertions(+), 21 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 20bc13ab8db2..a2190d7cfa74 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1528,30 +1528,14 @@ zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
}
-static inline void zap_present_pte(struct mmu_gather *tlb,
- struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
- unsigned long addr, struct zap_details *details,
- int *rss, bool *force_flush, bool *force_break)
+static inline void zap_present_folio_pte(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, struct folio *folio,
+ struct page *page, pte_t *pte, pte_t ptent, unsigned long addr,
+ struct zap_details *details, int *rss, bool *force_flush,
+ bool *force_break)
{
struct mm_struct *mm = tlb->mm;
bool delay_rmap = false;
- struct folio *folio;
- struct page *page;
-
- page = vm_normal_page(vma, addr, ptent);
- if (!page) {
- /* We don't need up-to-date accessed/dirty bits. */
- ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
- arch_check_zapped_pte(vma, ptent);
- tlb_remove_tlb_entry(tlb, pte, addr);
- VM_WARN_ON_ONCE(userfaultfd_wp(vma));
- ksm_might_unmap_zero_page(mm, ptent);
- return;
- }
-
- folio = page_folio(page);
- if (unlikely(!should_zap_folio(details, folio)))
- return;
if (!folio_test_anon(folio)) {
ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
@@ -1586,6 +1570,33 @@ static inline void zap_present_pte(struct mmu_gather *tlb,
}
}
+static inline void zap_present_pte(struct mmu_gather *tlb,
+ struct vm_area_struct *vma, pte_t *pte, pte_t ptent,
+ unsigned long addr, struct zap_details *details,
+ int *rss, bool *force_flush, bool *force_break)
+{
+ struct mm_struct *mm = tlb->mm;
+ struct folio *folio;
+ struct page *page;
+
+ page = vm_normal_page(vma, addr, ptent);
+ if (!page) {
+ /* We don't need up-to-date accessed/dirty bits. */
+ ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm);
+ arch_check_zapped_pte(vma, ptent);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ VM_WARN_ON_ONCE(userfaultfd_wp(vma));
+ ksm_might_unmap_zero_page(mm, ptent);
+ return;
+ }
+
+ folio = page_folio(page);
+ if (unlikely(!should_zap_folio(details, folio)))
+ return;
+ zap_present_folio_pte(tlb, vma, folio, page, pte, ptent, addr, details,
+ rss, force_flush, force_break);
+}
+
static unsigned long zap_pte_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
--
2.43.0