[PATCH uprobe, thp v2 5/5] uprobe: collapse THP pmd after removing all uprobes

From: Song Liu
Date: Tue Jun 04 2019 - 12:56:02 EST


After all uprobes are removed from the huge page (with PTE pgtable), it
is possible to collapse the pmd and benefit from THP again. This patch
does the collapse.

An issue on earlier version was discovered by kbuild test robot.

Reported-by: kbuild test robot <lkp@xxxxxxxxx>
Signed-off-by: Song Liu <songliubraving@xxxxxx>
---
include/linux/huge_mm.h | 7 +++++
kernel/events/uprobes.c | 3 ++
mm/huge_memory.c | 70 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 80 insertions(+)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7cd5c150c21d..b969022dc922 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -250,6 +250,9 @@ static inline bool thp_migration_supported(void)
return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}

+extern inline void try_collapse_huge_pmd(struct vm_area_struct *vma,
+ struct page *page);
+
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
@@ -368,6 +371,10 @@ static inline bool thp_migration_supported(void)
{
return false;
}
+
+static inline void try_collapse_huge_pmd(struct vm_area_struct *vma,
+ struct page *page) {}
+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#endif /* _LINUX_HUGE_MM_H */
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 88a8e1624bfa..0c8e2358dbf5 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -537,6 +537,9 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
if (ret && is_register && ref_ctr_updated)
update_ref_ctr(uprobe, mm, -1);

+ if (!ret && orig_page && PageTransCompound(orig_page))
+ try_collapse_huge_pmd(vma, orig_page);
+
return ret;
}

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9f8bce9a6b32..03855a480fd2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2886,6 +2886,76 @@ static struct shrinker deferred_split_shrinker = {
.flags = SHRINKER_NUMA_AWARE,
};

+/**
+ * try_collapse_huge_pmd - try collapse pmd for a pte mapped huge page
+ * @vma: vma containing the huge page
+ * @page: any sub page of the huge page
+ */
+void try_collapse_huge_pmd(struct vm_area_struct *vma,
+ struct page *page)
+{
+ struct page *hpage = compound_head(page);
+ struct mm_struct *mm = vma->vm_mm;
+ struct mmu_notifier_range range;
+ unsigned long haddr;
+ unsigned long addr;
+ pmd_t *pmd, _pmd;
+ int i, count = 0;
+ spinlock_t *ptl;
+
+ VM_BUG_ON_PAGE(!PageCompound(page), page);
+
+ haddr = page_address_in_vma(hpage, vma);
+ pmd = mm_find_pmd(mm, haddr);
+ if (!pmd)
+ return;
+
+ ptl = pmd_lock(mm, pmd);
+
+ /* step 1: check all mapped PTEs */
+ for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
+ pte_t *pte = pte_offset_map(pmd, addr);
+
+ if (pte_none(*pte))
+ continue;
+ if (hpage + i != vm_normal_page(vma, addr, *pte)) {
+ spin_unlock(ptl);
+ return;
+ }
+ count++;
+ }
+
+ /* step 2: adjust rmap and refcount */
+ for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
+ pte_t *pte = pte_offset_map(pmd, addr);
+ struct page *p;
+
+ if (pte_none(*pte))
+ continue;
+ p = vm_normal_page(vma, addr, *pte);
+ lock_page(p);
+ page_remove_rmap(p, false);
+ unlock_page(p);
+ put_page(p);
+ }
+
+ /* step 3: flip page table */
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
+ haddr, haddr + HPAGE_PMD_SIZE);
+ mmu_notifier_invalidate_range_start(&range);
+
+ _pmd = pmdp_collapse_flush(vma, haddr, pmd);
+ spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(&range);
+
+ /* step 4: free pgtable, clean up counters, etc. */
+ mm_dec_nr_ptes(mm);
+ pte_free(mm, pmd_pgtable(_pmd));
+ add_mm_counter(mm,
+ shmem_file(vma->vm_file) ? MM_SHMEMPAGES : MM_FILEPAGES,
+ -count);
+}
+
#ifdef CONFIG_DEBUG_FS
static int split_huge_pages_set(void *data, u64 val)
{
--
2.17.1