[tip: x86/mm] x86/tlb: Move __flush_tlb_all() out of line
From: tip-bot2 for Thomas Gleixner
Date: Sun Apr 26 2020 - 14:42:56 EST
The following commit has been merged into the x86/mm branch of tip:
Commit-ID: 4b04e6c236744635eb4852bd9690172734fa0a1c
Gitweb: https://git.kernel.org/tip/4b04e6c236744635eb4852bd9690172734fa0a1c
Author: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
AuthorDate: Tue, 21 Apr 2020 11:20:37 +02:00
Committer: Borislav Petkov <bp@xxxxxxx>
CommitterDate: Sun, 26 Apr 2020 18:17:31 +02:00
x86/tlb: Move __flush_tlb_all() out of line
Reduce the number of required exports to one and make flush_tlb_global()
static to the TLB code.
flush_tlb_local() cannot be confined to the TLB code as the MTRR
handling requires a PGE-less flush.
Suggested-by: Christoph Hellwig <hch@xxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Borislav Petkov <bp@xxxxxxx>
Link: https://lkml.kernel.org/r/20200421092559.740388137@xxxxxxxxxxxxx
---
arch/x86/include/asm/tlbflush.h | 23 +----------------------
arch/x86/mm/tlb.c | 29 ++++++++++++++++++++++-------
2 files changed, 23 insertions(+), 29 deletions(-)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d064ae8..7401c6c 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -142,8 +142,8 @@ static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid)
struct flush_tlb_info;
+void __flush_tlb_all(void);
void flush_tlb_local(void);
-void flush_tlb_global(void);
void flush_tlb_one_user(unsigned long addr);
void flush_tlb_one_kernel(unsigned long addr);
void flush_tlb_others(const struct cpumask *cpumask,
@@ -341,27 +341,6 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
extern void initialize_tlbstate_and_flush(void);
-/*
- * flush everything
- */
-static inline void __flush_tlb_all(void)
-{
- /*
- * This is to catch users with enabled preemption and the PGE feature
- * and don't trigger the warning in __native_flush_tlb().
- */
- VM_WARN_ON_ONCE(preemptible());
-
- if (boot_cpu_has(X86_FEATURE_PGE)) {
- flush_tlb_global();
- } else {
- /*
- * !PGE -> !PCID (setup_pcid()), thus every flush is total.
- */
- flush_tlb_local();
- }
-}
-
#define TLB_FLUSH_ALL -1UL
/*
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 209799d..aabf8c7 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -1018,12 +1018,6 @@ STATIC_NOPV void native_flush_tlb_global(void)
raw_local_irq_restore(flags);
}
-void flush_tlb_global(void)
-{
- __flush_tlb_global();
-}
-EXPORT_SYMBOL_GPL(flush_tlb_global);
-
/*
* Flush the entire current user mapping
*/
@@ -1046,7 +1040,28 @@ void flush_tlb_local(void)
{
__flush_tlb_local();
}
-EXPORT_SYMBOL_GPL(flush_tlb_local);
+
+/*
+ * Flush everything
+ */
+void __flush_tlb_all(void)
+{
+ /*
+ * This is to catch users with enabled preemption and the PGE feature
+ * and don't trigger the warning in __native_flush_tlb().
+ */
+ VM_WARN_ON_ONCE(preemptible());
+
+ if (boot_cpu_has(X86_FEATURE_PGE)) {
+ __flush_tlb_global();
+ } else {
+ /*
+ * !PGE -> !PCID (setup_pcid()), thus every flush is total.
+ */
+ flush_tlb_local();
+ }
+}
+EXPORT_SYMBOL_GPL(__flush_tlb_all);
/*
* arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.