[PATCH] tmp
From: David Hildenbrand (Arm)
Date: Thu Mar 12 2026 - 16:54:22 EST
Signed-off-by: David Hildenbrand (Arm) <david@xxxxxxxxxx>
---
mm/khugepaged.c | 89 +++++++++++++++++++++++++++++--------------------
1 file changed, 53 insertions(+), 36 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index b7b4680d27ab..6a3773bfa0a2 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -318,6 +318,34 @@ static ssize_t max_ptes_shared_store(struct kobject *kobj,
return count;
}
+static int collapse_max_ptes_none(struct collapse_control *cc,
+ struct vm_area_struct *vma)
+{
+ /* We don't mess with MISSING faults. */
+ if (vma && userfaultfd_armed(vma))
+ return 0;
+ /* MADV_COLLAPSE always collapses. */
+ if (!cc->is_khugepaged)
+ return HPAGE_PMD_NR;
+ return khugepaged_max_ptes_none;
+}
+
+static int collapse_max_ptes_shared(struct collapse_control *cc)
+{
+ /* MADV_COLLAPSE always collapses. */
+ if (!cc->is_khugepaged)
+ return HPAGE_PMD_NR;
+ return khugepaged_max_ptes_shared;
+}
+
+static int collapse_max_ptes_swap(struct collapse_control *cc)
+{
+ /* MADV_COLLAPSE always collapses. */
+ if (!cc->is_khugepaged)
+ return HPAGE_PMD_NR;
+ return khugepaged_max_ptes_swap;
+}
+
static struct kobj_attribute khugepaged_max_ptes_shared_attr =
__ATTR_RW(max_ptes_shared);
@@ -539,6 +567,8 @@ static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
unsigned long start_addr, pte_t *pte, struct collapse_control *cc,
struct list_head *compound_pagelist)
{
+ const int max_ptes_none = collapse_max_ptes_none(cc, vma);
+ const int max_ptes_shared = collapse_max_ptes_shared(cc);
struct page *page = NULL;
struct folio *folio = NULL;
unsigned long addr = start_addr;
@@ -550,16 +580,12 @@ static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
_pte++, addr += PAGE_SIZE) {
pte_t pteval = ptep_get(_pte);
if (pte_none_or_zero(pteval)) {
- ++none_or_zero;
- if (!userfaultfd_armed(vma) &&
- (!cc->is_khugepaged ||
- none_or_zero <= khugepaged_max_ptes_none)) {
- continue;
- } else {
+ if (++none_or_zero > max_ptes_none) {
result = SCAN_EXCEED_NONE_PTE;
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
goto out;
}
+ continue;
}
if (!pte_present(pteval)) {
result = SCAN_PTE_NON_PRESENT;
@@ -586,9 +612,7 @@ static enum scan_result __collapse_huge_page_isolate(struct vm_area_struct *vma,
/* See hpage_collapse_scan_pmd(). */
if (folio_maybe_mapped_shared(folio)) {
- ++shared;
- if (cc->is_khugepaged &&
- shared > khugepaged_max_ptes_shared) {
+ if (++shared > max_ptes_shared) {
result = SCAN_EXCEED_SHARED_PTE;
count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
goto out;
@@ -1247,6 +1271,9 @@ static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long start_addr,
bool *mmap_locked, struct collapse_control *cc)
{
+ const int max_ptes_none = collapse_max_ptes_none(cc, vma);
+ const int max_ptes_swap = collapse_max_ptes_swap(cc);
+ const int max_ptes_shared = collapse_max_ptes_shared(cc);
pmd_t *pmd;
pte_t *pte, *_pte;
int none_or_zero = 0, shared = 0, referenced = 0;
@@ -1280,36 +1307,28 @@ static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm,
pte_t pteval = ptep_get(_pte);
if (pte_none_or_zero(pteval)) {
- ++none_or_zero;
- if (!userfaultfd_armed(vma) &&
- (!cc->is_khugepaged ||
- none_or_zero <= khugepaged_max_ptes_none)) {
- continue;
- } else {
+ if (++none_or_zero > max_ptes_none) {
result = SCAN_EXCEED_NONE_PTE;
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
goto out_unmap;
}
+ continue;
}
if (!pte_present(pteval)) {
- ++unmapped;
- if (!cc->is_khugepaged ||
- unmapped <= khugepaged_max_ptes_swap) {
- /*
- * Always be strict with uffd-wp
- * enabled swap entries. Please see
- * comment below for pte_uffd_wp().
- */
- if (pte_swp_uffd_wp_any(pteval)) {
- result = SCAN_PTE_UFFD_WP;
- goto out_unmap;
- }
- continue;
- } else {
+ if (++unmapped > max_ptes_swap) {
result = SCAN_EXCEED_SWAP_PTE;
count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
goto out_unmap;
}
+ /*
+ * Always be strict with uffd-wp enabled swap entries.
+ * See the comment below for pte_uffd_wp().
+ */
+ if (pte_swp_uffd_wp_any(pteval)) {
+ result = SCAN_PTE_UFFD_WP;
+ goto out_unmap;
+ }
+ continue;
}
if (pte_uffd_wp(pteval)) {
/*
@@ -1348,9 +1367,7 @@ static enum scan_result hpage_collapse_scan_pmd(struct mm_struct *mm,
* is shared.
*/
if (folio_maybe_mapped_shared(folio)) {
- ++shared;
- if (cc->is_khugepaged &&
- shared > khugepaged_max_ptes_shared) {
+ if (++shared > max_ptes_shared) {
result = SCAN_EXCEED_SHARED_PTE;
count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
goto out_unmap;
@@ -2305,6 +2322,8 @@ static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm,
unsigned long addr, struct file *file, pgoff_t start,
struct collapse_control *cc)
{
+ const int max_ptes_none = collapse_max_ptes_none(cc, NULL);
+ const int max_ptes_swap = collapse_max_ptes_swap(cc);
struct folio *folio = NULL;
struct address_space *mapping = file->f_mapping;
XA_STATE(xas, &mapping->i_pages, start);
@@ -2323,8 +2342,7 @@ static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm,
if (xa_is_value(folio)) {
swap += 1 << xas_get_order(&xas);
- if (cc->is_khugepaged &&
- swap > khugepaged_max_ptes_swap) {
+ if (swap > max_ptes_swap) {
result = SCAN_EXCEED_SWAP_PTE;
count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
break;
@@ -2395,8 +2413,7 @@ static enum scan_result hpage_collapse_scan_file(struct mm_struct *mm,
cc->progress += HPAGE_PMD_NR;
if (result == SCAN_SUCCEED) {
- if (cc->is_khugepaged &&
- present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
+ if (present < HPAGE_PMD_NR - max_ptes_none) {
result = SCAN_EXCEED_NONE_PTE;
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
} else {
--
2.43.0
Then extend it by passing an order + return value check in this patch here. You can
directly squash changes from patch #4 in here then.
--
Cheers,
David