[tip: x86/mm] x86/tlb: Restrict access to tlbstate
From: tip-bot2 for Thomas Gleixner
Date: Sun Apr 26 2020 - 14:43:07 EST
The following commit has been merged into the x86/mm branch of tip:
Commit-ID: bfe3d8f6313d1e10806062ba22c5f660dddecbcc
Gitweb: https://git.kernel.org/tip/bfe3d8f6313d1e10806062ba22c5f660dddecbcc
Author: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
AuthorDate: Tue, 21 Apr 2020 11:20:43 +02:00
Committer: Borislav Petkov <bp@xxxxxxx>
CommitterDate: Sun, 26 Apr 2020 18:52:33 +02:00
x86/tlb: Restrict access to tlbstate
Hide tlbstate, flush_tlb_info and related helpers when tlbflush.h is
included from a module. Modules have absolutely no business with these
internals.
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Borislav Petkov <bp@xxxxxxx>
Reviewed-by: Alexandre Chartre <alexandre.chartre@xxxxxxxxxx>
Acked-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Link: https://lkml.kernel.org/r/20200421092600.328438734@xxxxxxxxxxxxx
---
arch/x86/include/asm/tlbflush.h | 96 ++++++++++++++++----------------
arch/x86/mm/init.c | 1 +-
2 files changed, 49 insertions(+), 48 deletions(-)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index f973121..8c87a2e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -13,19 +13,46 @@
#include <asm/pti.h>
#include <asm/processor-flags.h>
-struct flush_tlb_info;
-
void __flush_tlb_all(void);
-void flush_tlb_local(void);
-void flush_tlb_one_user(unsigned long addr);
-void flush_tlb_one_kernel(unsigned long addr);
-void flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info);
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#endif
+#define TLB_FLUSH_ALL -1UL
+
+void cr4_update_irqsoff(unsigned long set, unsigned long clear);
+unsigned long cr4_read_shadow(void);
+
+/* Set in this cpu's CR4. */
+static inline void cr4_set_bits_irqsoff(unsigned long mask)
+{
+ cr4_update_irqsoff(mask, 0);
+}
+/* Clear in this cpu's CR4. */
+static inline void cr4_clear_bits_irqsoff(unsigned long mask)
+{
+ cr4_update_irqsoff(0, mask);
+}
+
+/* Set in this cpu's CR4. */
+static inline void cr4_set_bits(unsigned long mask)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cr4_set_bits_irqsoff(mask);
+ local_irq_restore(flags);
+}
+
+/* Clear in this cpu's CR4. */
+static inline void cr4_clear_bits(unsigned long mask)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cr4_clear_bits_irqsoff(mask);
+ local_irq_restore(flags);
+}
+
+#ifndef MODULE
/*
* 6 because 6 should be plenty and struct tlb_state will fit in two cache
* lines.
@@ -129,54 +156,17 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
bool nmi_uaccess_okay(void);
#define nmi_uaccess_okay nmi_uaccess_okay
-void cr4_update_irqsoff(unsigned long set, unsigned long clear);
-unsigned long cr4_read_shadow(void);
-
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
}
-/* Set in this cpu's CR4. */
-static inline void cr4_set_bits_irqsoff(unsigned long mask)
-{
- cr4_update_irqsoff(mask, 0);
-}
-
-/* Clear in this cpu's CR4. */
-static inline void cr4_clear_bits_irqsoff(unsigned long mask)
-{
- cr4_update_irqsoff(0, mask);
-}
-
-/* Set in this cpu's CR4. */
-static inline void cr4_set_bits(unsigned long mask)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- cr4_set_bits_irqsoff(mask);
- local_irq_restore(flags);
-}
-
-/* Clear in this cpu's CR4. */
-static inline void cr4_clear_bits(unsigned long mask)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- cr4_clear_bits_irqsoff(mask);
- local_irq_restore(flags);
-}
-
extern unsigned long mmu_cr4_features;
extern u32 *trampoline_cr4_features;
extern void initialize_tlbstate_and_flush(void);
-#define TLB_FLUSH_ALL -1UL
-
/*
* TLB flushing:
*
@@ -215,6 +205,16 @@ struct flush_tlb_info {
bool freed_tables;
};
+void flush_tlb_local(void);
+void flush_tlb_one_user(unsigned long addr);
+void flush_tlb_one_kernel(unsigned long addr);
+void flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info);
+
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#endif
+
#define flush_tlb_mm(mm) \
flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
@@ -255,4 +255,6 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+#endif /* !MODULE */
+
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index d37e816..248dc8f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -992,7 +992,6 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
.next_asid = 1,
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
};
-EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
{