[PATCH] tlb/hugetlb: Add framework to handle PGDIR_SIZE HugeTLB pages
From: Anshuman Khandual
Date: Wed Apr 13 2022 - 06:06:55 EST
Change tlb_remove_huge_tlb_entry() to accommodate larger PGDIR_SIZE HugeTLB
pages via adding a new helper tlb_flush_pgd_range(). While here also update
struct mmu_gather as required, that is add a new member cleared_pgds.
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@xxxxxxxxxxxxx>
Cc: Nick Piggin <npiggin@xxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Arnd Bergmann <arnd@xxxxxxxx>
Cc: linux-arch@xxxxxxxxxxxxxxx
Cc: linux-mm@xxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx>
---
This applies on v5.18-rc2, some earlier context could be found here
https://lore.kernel.org/all/20220406112124.GD2731@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/
include/asm-generic/tlb.h | 18 ++++++++++++++++--
1 file changed, 16 insertions(+), 2 deletions(-)
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index eee6f7763a39..6eaf0080ef2d 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -282,6 +282,7 @@ struct mmu_gather {
unsigned int cleared_pmds : 1;
unsigned int cleared_puds : 1;
unsigned int cleared_p4ds : 1;
+ unsigned int cleared_pgds : 1;
/*
* tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
@@ -325,6 +326,7 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->cleared_pmds = 0;
tlb->cleared_puds = 0;
tlb->cleared_p4ds = 0;
+ tlb->cleared_pgds = 0;
/*
* Do not reset mmu_gather::vma_* fields here, we do not
* call into tlb_start_vma() again to set them if there is an
@@ -420,7 +422,7 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
* these bits.
*/
if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
- tlb->cleared_puds || tlb->cleared_p4ds))
+ tlb->cleared_puds || tlb->cleared_p4ds || tlb->cleared_pgds))
return;
tlb_flush(tlb);
@@ -472,6 +474,8 @@ static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
return PUD_SHIFT;
if (tlb->cleared_p4ds)
return P4D_SHIFT;
+ if (tlb->cleared_pgds)
+ return PGDIR_SHIFT;
return PAGE_SHIFT;
}
@@ -545,6 +549,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
tlb->cleared_p4ds = 1;
}
+static inline void tlb_flush_pgd_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ __tlb_adjust_range(tlb, address, size);
+ tlb->cleared_pgds = 1;
+}
+
+
#ifndef __tlb_remove_tlb_entry
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#endif
@@ -565,7 +577,9 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
do { \
unsigned long _sz = huge_page_size(h); \
- if (_sz >= P4D_SIZE) \
+ if (_sz >= PGDIR_SIZE) \
+ tlb_flush_pgd_range(tlb, address, _sz); \
+ else if (_sz >= P4D_SIZE) \
tlb_flush_p4d_range(tlb, address, _sz); \
else if (_sz >= PUD_SIZE) \
tlb_flush_pud_range(tlb, address, _sz); \
--
2.20.1