This is a helper function for freeing the bits of the page table that
map a particular HugeTLB PTE.
Signed-off-by: James Houghton <jthoughton@xxxxxxxxxx>
---
include/linux/hugetlb.h | 2 ++
mm/hugetlb.c | 17 +++++++++++++++++
2 files changed, 19 insertions(+)
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 1d4ec9dfdebf..33ba48fac551 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -107,6 +107,8 @@ bool hugetlb_pte_none_mostly(const struct hugetlb_pte *hpte);
pte_t hugetlb_ptep_get(const struct hugetlb_pte *hpte);
void hugetlb_pte_clear(struct mm_struct *mm, const struct hugetlb_pte *hpte,
unsigned long address);
+void hugetlb_free_range(struct mmu_gather *tlb, const struct hugetlb_pte *hpte,
+ unsigned long start, unsigned long end);
struct hugepage_subpool {
spinlock_t lock;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1a1434e29740..a2d2ffa76173 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1120,6 +1120,23 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
return false;
}
+void hugetlb_free_range(struct mmu_gather *tlb, const struct hugetlb_pte *hpte,
+ unsigned long start, unsigned long end)
+{
+ unsigned long floor = start & hugetlb_pte_mask(hpte);
+ unsigned long ceiling = floor + hugetlb_pte_size(hpte);
+
+ if (hugetlb_pte_size(hpte) >= PGDIR_SIZE) {
+ free_p4d_range(tlb, (pgd_t *)hpte->ptep, start, end, floor, ceiling);
+ } else if (hugetlb_pte_size(hpte) >= P4D_SIZE) {
+ free_pud_range(tlb, (p4d_t *)hpte->ptep, start, end, floor, ceiling);
+ } else if (hugetlb_pte_size(hpte) >= PUD_SIZE) {
+ free_pmd_range(tlb, (pud_t *)hpte->ptep, start, end, floor, ceiling);
+ } else if (hugetlb_pte_size(hpte) >= PMD_SIZE) {
+ free_pte_range(tlb, (pmd_t *)hpte->ptep, start);
+ }
+}
+
bool hugetlb_pte_present_leaf(const struct hugetlb_pte *hpte)
{
pgd_t pgd;