[PATCH RFC 03/19] x86/tlb: Expose some flush function declarations to modules

From: Brendan Jackman

Date: Wed Feb 25 2026 - 11:39:22 EST


In commit bfe3d8f6313d ("x86/tlb: Restrict access to tlbstate") some
low-level logic (the important detail here is flush_tlb_info) was hidden
from modules, along with functions associated with that data.

Later, the set of functions defined here changed and there are now a
bunch of flush_tlb_*() functions that do not depend on x86 internals
like flush_tlb_info.

This leads to some build fragility: KVM (which can be a module) cares
about TLB flushing and includes {linux->asm}/mmu_context.h which
includes asm/tlb.h and asm/tlbflush.h. This x86 TLB code expects these
helpers to be defined (e.g. tlb_flush() calls flush_tlb_mm_range()).

Modules probably shouldn't call these helpers - luckily this is already
enforced by the lack of EXPORT_SYMBOL(). Therefore keep things simple
and just expose the declarations anyway to prevent build failures.

Signed-off-by: Brendan Jackman <jackmanb@xxxxxxxxxx>
---
arch/x86/include/asm/tlbflush.h | 43 +++++++++++++++++++++--------------------
1 file changed, 22 insertions(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 5a3cdc439e38d..ee49724403ef9 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -229,7 +229,6 @@ struct flush_tlb_info {
u8 trim_cpumask;
};

-void flush_tlb_local(void);
void flush_tlb_one_user(unsigned long addr);
void flush_tlb_one_kernel(unsigned long addr);
void flush_tlb_multi(const struct cpumask *cpumask,
@@ -303,26 +302,6 @@ static inline void mm_clear_asid_transition(struct mm_struct *mm) { }
static inline bool mm_in_asid_transition(struct mm_struct *mm) { return false; }
#endif /* CONFIG_BROADCAST_TLB_FLUSH */

-#define flush_tlb_mm(mm) \
- flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
-
-#define flush_tlb_range(vma, start, end) \
- flush_tlb_mm_range((vma)->vm_mm, start, end, \
- ((vma)->vm_flags & VM_HUGETLB) \
- ? huge_page_shift(hstate_vma(vma)) \
- : PAGE_SHIFT, true)
-
-extern void flush_tlb_all(void);
-extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
- unsigned long end, unsigned int stride_shift,
- bool freed_tables);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
-
-static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
-{
- flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
-}
-
static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
{
bool should_defer = false;
@@ -487,4 +466,26 @@ static inline void __native_tlb_flush_global(unsigned long cr4)
native_write_cr4(cr4 ^ X86_CR4_PGE);
native_write_cr4(cr4);
}
+
+#define flush_tlb_mm(mm) \
+ flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
+
+#define flush_tlb_range(vma, start, end) \
+ flush_tlb_mm_range((vma)->vm_mm, start, end, \
+ ((vma)->vm_flags & VM_HUGETLB) \
+ ? huge_page_shift(hstate_vma(vma)) \
+ : PAGE_SHIFT, true)
+
+void flush_tlb_local(void);
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned int stride_shift,
+ bool freed_tables);
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+
+static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
+{
+ flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
+}
+
#endif /* _ASM_X86_TLBFLUSH_H */

--
2.51.2