[PATCH v3 04/13] arm64: mm: Push __TLBI_VADDR() into __tlbi_level()

From: Ryan Roberts

Date: Mon Mar 02 2026 - 08:59:58 EST


From: Will Deacon <will@xxxxxxxxxx>

The __TLBI_VADDR() macro takes an ASID and an address and converts them
into a single argument formatted correctly for a TLB invalidation
instruction.

Rather than have callers worry about this (especially in the case where
the ASID is zero), push the macro down into __tlbi_level() via a new
__tlbi_level_asid() helper.

Signed-off-by: Will Deacon <will@xxxxxxxxxx>
Reviewed-by: Linu Cherian <linu.cherian@xxxxxxx>
Reviewed-by: Jonathan Cameron <jonathan.cameron@xxxxxxxxxx>
Signed-off-by: Ryan Roberts <ryan.roberts@xxxxxxx>
---
arch/arm64/include/asm/tlbflush.h | 14 ++++++++++----
arch/arm64/kernel/sys_compat.c | 2 +-
arch/arm64/kvm/hyp/nvhe/mm.c | 2 +-
arch/arm64/kvm/hyp/nvhe/tlb.c | 2 --
arch/arm64/kvm/hyp/pgtable.c | 4 ++--
arch/arm64/kvm/hyp/vhe/tlb.c | 2 --
6 files changed, 14 insertions(+), 12 deletions(-)

diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index e586d9b71ea2d..2832305606b72 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -142,9 +142,10 @@ static __always_inline void ipas2e1is(u64 arg)
__tlbi(ipas2e1is, arg);
}

-static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
+static __always_inline void __tlbi_level_asid(tlbi_op op, u64 addr, u32 level,
+ u16 asid)
{
- u64 arg = addr;
+ u64 arg = __TLBI_VADDR(addr, asid);

if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && level <= 3) {
u64 ttl = level | (get_trans_granule() << 2);
@@ -155,6 +156,11 @@ static __always_inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
op(arg);
}

+static inline void __tlbi_level(tlbi_op op, u64 addr, u32 level)
+{
+ __tlbi_level_asid(op, addr, level, 0);
+}
+
/*
* This macro creates a properly formatted VA operand for the TLB RANGE. The
* value bit assignments are:
@@ -511,8 +517,7 @@ do { \
if (!system_supports_tlb_range() || \
__flush_pages == 1 || \
(lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \
- addr = __TLBI_VADDR(__flush_start, asid); \
- __tlbi_level(op, addr, tlb_level); \
+ __tlbi_level_asid(op, __flush_start, tlb_level, asid); \
__flush_start += stride; \
__flush_pages -= stride >> PAGE_SHIFT; \
continue; \
@@ -685,6 +690,7 @@ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
#define huge_pmd_needs_flush huge_pmd_needs_flush

#undef __tlbi_user
+#undef __TLBI_VADDR
#endif

#endif
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index b9d4998c97efa..7e9860143add8 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -36,7 +36,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
* The workaround requires an inner-shareable tlbi.
* We pick the reserved-ASID to minimise the impact.
*/
- __tlbi(aside1is, __TLBI_VADDR(0, 0));
+ __tlbi(aside1is, 0UL);
__tlbi_sync_s1ish();
}

diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index 218976287d3fe..4d8fcc7a3a41e 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -270,7 +270,7 @@ static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
* https://lore.kernel.org/kvm/20221017115209.2099-1-will@xxxxxxxxxx/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
*/
dsb(ishst);
- __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
+ __tlbi_level(vale2is, addr, level);
__tlbi_sync_s1ish_hyp();
isb();
}
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 3dc1ce0d27fe6..b29140995d484 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -158,7 +158,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
- ipa >>= 12;
__tlbi_level(ipas2e1is, ipa, level);

/*
@@ -188,7 +187,6 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
- ipa >>= 12;
__tlbi_level(ipas2e1, ipa, level);

/*
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 9b480f947da26..30226f2d5564a 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -490,14 +490,14 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,

kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
+ __tlbi_level(vae2is, ctx->addr, TLBI_TTL_UNKNOWN);
} else {
if (ctx->end - ctx->addr < granule)
return -EINVAL;

kvm_clear_pte(ctx->ptep);
dsb(ishst);
- __tlbi_level(vale2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
+ __tlbi_level(vale2is, ctx->addr, ctx->level);
*unmapped += granule;
}

diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index 35855dadfb1b3..f7b9dfe3f3a5a 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -104,7 +104,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
- ipa >>= 12;
__tlbi_level(ipas2e1is, ipa, level);

/*
@@ -136,7 +135,6 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
- ipa >>= 12;
__tlbi_level(ipas2e1, ipa, level);

/*
--
2.43.0