[PATCH 2/2] mm/page_vma_mapped: page table boundary is already guaranteed
From: Wei Yang
Date: Wed Nov 27 2019 - 20:03:50 EST
The check here is to guarantee pvmw->address iteration is limited in one
page table boundary. To be specific, here the address range should be in
one PMD_SIZE.
If my understanding is correct, this check is already done in the above
check:
address >= __vma_address(page, vma) + PMD_SIZE
The boundary check here seems not necessary.
Signed-off-by: Wei Yang <richardw.yang@xxxxxxxxxxxxxxx>
---
Test:
more than 48 hours kernel build test shows this code is not touched.
---
mm/page_vma_mapped.c | 13 +------------
1 file changed, 1 insertion(+), 12 deletions(-)
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 76e03650a3ab..25aada8a1271 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -163,7 +163,6 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
return not_found(pvmw);
return true;
}
-restart:
pgd = pgd_offset(mm, pvmw->address);
if (!pgd_present(*pgd))
return false;
@@ -225,17 +224,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
__vma_address(pvmw->page, pvmw->vma) +
PMD_SIZE)
return not_found(pvmw);
- /* Did we cross page table boundary? */
- if (pvmw->address % PMD_SIZE == 0) {
- pte_unmap(pvmw->pte);
- if (pvmw->ptl) {
- spin_unlock(pvmw->ptl);
- pvmw->ptl = NULL;
- }
- goto restart;
- } else {
- pvmw->pte++;
- }
+ pvmw->pte++;
} while (pte_none(*pvmw->pte));
if (!pvmw->ptl) {
--
2.17.1