Re: [PATCH v3 09/10] mm: thp: always enable mTHP support
From: Luiz Capitulino
Date: Mon Apr 13 2026 - 11:40:09 EST
On 2026-04-11 03:22, Baolin Wang wrote:
On 4/9/26 4:23 AM, Luiz Capitulino wrote:
If PMD-sized pages are not supported on an architecture (ie. theSorry, I still don't like the changes here, because shmem_allowable_huge_orders() is meant to determine which large orders are allowed. Something like below (untested):
arch implements arch_has_pmd_leaves() and it returns false) then the
current code disables all THP, including mTHP.
This commit fixes this by allowing mTHP to be always enabled for all
archs. When PMD-sized pages are not supported, its sysfs entry won't be
created and their mapping will be disallowed at page-fault time.
Similarly, this commit implements the following changes for shmem:
- In shmem_allowable_huge_orders(): drop the pgtable_has_pmd_leaves()
check so that mTHP sizes are considered
- In shmem_alloc_and_add_folio(): don't consider PMD and PUD orders
when PMD-sized pages are not supported by the CPU
Signed-off-by: Luiz Capitulino <luizcap@xxxxxxxxxx>
---
mm/huge_memory.c | 13 ++++++++-----
mm/shmem.c | 4 +++-
2 files changed, 11 insertions(+), 6 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86e489c0a150..6de3d8ebc35c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -118,6 +118,9 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
else
supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
+ if (!pgtable_has_pmd_leaves())
+ supported_orders &= ~(BIT(PMD_ORDER) | BIT(PUD_ORDER));
+
orders &= supported_orders;
if (!orders)
return 0;
@@ -125,7 +128,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
if (!vma->vm_mm) /* vdso */
return 0;
- if (!pgtable_has_pmd_leaves() || vma_thp_disabled(vma, vm_flags, forced_collapse))
+ if (vma_thp_disabled(vma, vm_flags, forced_collapse))
return 0;
/* khugepaged doesn't collapse DAX vma, but page fault is fine. */
@@ -787,7 +790,7 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
* disable all other sizes. powerpc's PMD_ORDER isn't a compile-time
* constant so we have to do this here.
*/
- if (!anon_orders_configured)
+ if (!anon_orders_configured && pgtable_has_pmd_leaves())
huge_anon_orders_inherit = BIT(PMD_ORDER);
*hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
@@ -809,6 +812,9 @@ static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
}
orders = THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DEFAULT;
+ if (!pgtable_has_pmd_leaves())
+ orders &= ~(BIT(PMD_ORDER) | BIT(PUD_ORDER));
+
order = highest_order(orders);
while (orders) {
thpsize = thpsize_create(order, *hugepage_kobj);
@@ -908,9 +914,6 @@ static int __init hugepage_init(void)
int err;
struct kobject *hugepage_kobj;
- if (!pgtable_has_pmd_leaves())
- return -EINVAL;
-
/*
* hugepages can't be allocated by the buddy allocator
*/
diff --git a/mm/shmem.c b/mm/shmem.c
index 613393eae5a9..b49a30475cb0 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1839,7 +1839,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
vm_flags_t vm_flags = vma ? vma->vm_flags : 0;
unsigned int global_orders;
- if (!pgtable_has_pmd_leaves() || (vma && vma_thp_disabled(vma, vm_flags, shmem_huge_force)))
+ if (vma && vma_thp_disabled(vma, vm_flags, shmem_huge_force))
return 0;
global_orders = shmem_huge_global_enabled(inode, index, write_end,
@@ -1947,6 +1947,8 @@ static struct folio *shmem_alloc_and_add_folio(struct vm_fault *vmf,
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
orders = 0;
+ else if (!pgtable_has_pmd_leaves())
+ orders &= ~(BIT(PMD_ORDER) | BIT(PUD_ORDER));
Sure, I can do something like this.
@@ -1834,6 +1834,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
loff_t write_end, bool shmem_huge_force)
{
+ unsigned int filter_orders = pgtable_has_pmd_leaves() ? (BIT(PMD_ORDER) | BIT(PUD_ORDER)) : 0;
unsigned long mask = READ_ONCE(huge_shmem_orders_always);
unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
vm_flags_t vm_flags = vma ? vma->vm_flags : 0;
@@ -1846,7 +1847,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
shmem_huge_force, vma, vm_flags);
/* Tmpfs huge pages allocation */
if (!vma || !vma_is_anon_shmem(vma))
- return global_orders;
+ return global_orders & ~filter_orders;
/*
* Following the 'deny' semantics of the top level, force the huge
@@ -1871,6 +1872,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
if (global_orders > 0)
mask |= READ_ONCE(huge_shmem_orders_inherit);
+ mask &= ~filter_orders;
return THP_ORDERS_ALL_FILE_DEFAULT & mask;
}
Additionally, we also need a pgtable_has_pmd_leaves() check before setting huge_shmem_orders_inherit as well.
@@ -5428,7 +5430,7 @@ void __init shmem_init(void)
* Default to setting PMD-sized THP to inherit the global setting and
* disable all other multi-size THPs.
*/
- if (!shmem_orders_configured)
+ if (!shmem_orders_configured && pgtable_has_pmd_leaves())
huge_shmem_orders_inherit = BIT(HPAGE_PMD_ORDER);
#endif
return;