[RFC V1 11/16] arm64/mm: Route all pgtable atomics to central helpers

From: Anshuman Khandual

Date: Tue Feb 24 2026 - 00:13:58 EST


Route all cmpxchg() operations performed on various page table entries to a
new ptdesc_cmpxchg_relaxed() helper. Similarly route all xchg() operations
performed on page table entries to a new ptdesc_xchg_relaxed() helper.

Currently these helpers just forward to the same APIs that were previously
called direct, but in future we will change the routing for D128 which is
too long to use the standard APIs.

Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Ryan Roberts <ryan.roberts@xxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx>
---
arch/arm64/include/asm/pgtable.h | 23 +++++++++++++++++------
arch/arm64/mm/fault.c | 2 +-
2 files changed, 18 insertions(+), 7 deletions(-)

diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 42124d2f323d..cf69ce68f951 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -87,6 +87,17 @@ static inline void arch_leave_lazy_mmu_mode(void)
#define ptdesc_get(x) READ_ONCE(x)
#define ptdesc_set(x, val) WRITE_ONCE(x, val)

+static inline ptdesc_t ptdesc_cmpxchg_relaxed(ptdesc_t *ptep, ptdesc_t old,
+ ptdesc_t new)
+{
+ return cmpxchg_relaxed(ptep, old, new);
+}
+
+static inline ptdesc_t ptdesc_xchg_relaxed(ptdesc_t *ptep, ptdesc_t new)
+{
+ return xchg_relaxed(ptep, new);
+}
+
#define pmdp_get pmdp_get
static inline pmd_t pmdp_get(pmd_t *pmdp)
{
@@ -1313,8 +1324,8 @@ static inline int __ptep_test_and_clear_young(struct vm_area_struct *vma,
do {
old_pte = pte;
pte = pte_mkold(pte);
- pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
- pte_val(old_pte), pte_val(pte));
+ pte_val(pte) = ptdesc_cmpxchg_relaxed(&pte_val(*ptep),
+ pte_val(old_pte), pte_val(pte));
} while (pte_val(pte) != pte_val(old_pte));

return pte_young(pte);
@@ -1350,7 +1361,7 @@ static inline pte_t __ptep_get_and_clear_anysz(struct mm_struct *mm,
pte_t *ptep,
unsigned long pgsize)
{
- pte_t pte = __pte(xchg_relaxed(&pte_val(*ptep), 0));
+ pte_t pte = __pte(ptdesc_xchg_relaxed(&pte_val(*ptep), 0));

switch (pgsize) {
case PAGE_SIZE:
@@ -1426,7 +1437,7 @@ static inline void ___ptep_set_wrprotect(struct mm_struct *mm,
do {
old_pte = pte;
pte = pte_wrprotect(pte);
- pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+ pte_val(pte) = ptdesc_cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));
} while (pte_val(pte) != pte_val(old_pte));
}
@@ -1464,7 +1475,7 @@ static inline void __clear_young_dirty_pte(struct vm_area_struct *vma,
if (flags & CYDP_CLEAR_DIRTY)
pte = pte_mkclean(pte);

- pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
+ pte_val(pte) = ptdesc_cmpxchg_relaxed(&pte_val(*ptep),
pte_val(old_pte), pte_val(pte));
} while (pte_val(pte) != pte_val(old_pte));
}
@@ -1503,7 +1514,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
- return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
+ return __pmd(ptdesc_xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd)));
}
#endif

diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 7bb14765a98d..21964a387bf8 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -230,7 +230,7 @@ int __ptep_set_access_flags(struct vm_area_struct *vma,
pteval ^= PTE_RDONLY;
pteval |= pte_val(entry);
pteval ^= PTE_RDONLY;
- pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
+ pteval = ptdesc_cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
} while (pteval != old_pteval);

/*
--
2.43.0