[PATCH 19/45] fs/proc: Enable gather_pte_stats to handle hugetlb vmas

From: Oscar Salvador
Date: Thu Jul 04 2024 - 00:35:44 EST


PMD-mapped hugetlb vmas will also reach gather_pte_stats.
Add the required code so it knows how to handle those there.

Signed-off-by: Oscar Salvador <osalvador@xxxxxxx>
---
fs/proc/task_mmu.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 3785a44b97fa..e13754d3246e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -3141,7 +3141,7 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
return page;
}

-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
struct vm_area_struct *vma,
unsigned long addr)
@@ -3176,15 +3176,21 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *orig_pte;
pte_t *pte;

-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- ptl = pmd_trans_huge_lock(pmd, vma);
+#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
+ ptl = pmd_huge_lock(pmd, vma);
if (ptl) {
+ unsigned long nr_pages;
struct page *page;

+ if (is_vm_hugetlb_page(vma))
+ nr_pages = 1;
+ else
+ nr_pages = HPAGE_PMD_SIZE / PAGE_SIZE;
+
page = can_gather_numa_stats_pmd(*pmd, vma, addr);
if (page)
gather_stats(page, md, pmd_dirty(*pmd),
- HPAGE_PMD_SIZE/PAGE_SIZE);
+ nr_pages);
spin_unlock(ptl);
return 0;
}
--
2.26.2