Re: [PATCH v14 11/13] x86/mm: do targeted broadcast flushing from tlbbatch code
From: Dave Hansen
Date: Mon Mar 03 2025 - 16:47:46 EST
On 3/3/25 03:46, Borislav Petkov wrote:
> On Tue, Feb 25, 2025 at 10:00:46PM -0500, Rik van Riel wrote:
>> +static inline bool cpu_need_tlbsync(void)
>> +{
>> + return this_cpu_read(cpu_tlbstate.need_tlbsync);
>> +}
>> +
>> +static inline void cpu_write_tlbsync(bool state)
>
> That thing feels better like "cpu_set_tlbsync" in the code...
>
>> +{
>> + this_cpu_write(cpu_tlbstate.need_tlbsync, state);
>> +}
>> #else
>> static inline u16 mm_global_asid(struct mm_struct *mm)
>> {
>
> ...
>
>> +static inline void tlbsync(void)
>> +{
>> + if (!cpu_need_tlbsync())
>> + return;
>> + __tlbsync();
>> + cpu_write_tlbsync(false);
>> +}
>
> Easier to parse visually:
>
> static inline void tlbsync(void)
> {
> if (cpu_need_tlbsync()) {
> __tlbsync();
> cpu_write_tlbsync(false);
> }
> }
>
> Final:
>
> From: Rik van Riel <riel@xxxxxxxxxxx>
> Date: Tue, 25 Feb 2025 22:00:46 -0500
> Subject: [PATCH] x86/mm: Do targeted broadcast flushing from tlbbatch code
>
> Instead of doing a system-wide TLB flush from arch_tlbbatch_flush(), queue up
> asynchronous, targeted flushes from arch_tlbbatch_add_pending().
>
> This also allows to avoid adding the CPUs of processes using broadcast
> flushing to the batch->cpumask, and will hopefully further reduce TLB flushing
> from the reclaim and compaction paths.
>
> [ bp:
> - Massage
> - :%s/\<static_cpu_has\>/cpu_feature_enabled/cgi ]
>
> Signed-off-by: Rik van Riel <riel@xxxxxxxxxxx>
> Signed-off-by: Borislav Petkov (AMD) <bp@xxxxxxxxx>
> Link: https://lore.kernel.org/r/20250226030129.530345-12-riel@xxxxxxxxxxx
> ---
> arch/x86/include/asm/tlb.h | 12 ++---
> arch/x86/include/asm/tlbflush.h | 27 +++++++----
> arch/x86/mm/tlb.c | 79 +++++++++++++++++++++++++++++++--
> 3 files changed, 100 insertions(+), 18 deletions(-)
>
> diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
> index 04f2c6f4cee3..b5c2005725cf 100644
> --- a/arch/x86/include/asm/tlb.h
> +++ b/arch/x86/include/asm/tlb.h
> @@ -102,16 +102,16 @@ static inline void __tlbsync(void) { }
> #define INVLPGB_FINAL_ONLY BIT(4)
> #define INVLPGB_INCLUDE_NESTED BIT(5)
>
> -static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid,
> - unsigned long addr,
> - u16 nr,
> - bool pmd_stride)
> +static inline void __invlpgb_flush_user_nr_nosync(unsigned long pcid,
> + unsigned long addr,
> + u16 nr,
> + bool pmd_stride)
> {
> __invlpgb(0, pcid, addr, nr, pmd_stride, INVLPGB_PCID | INVLPGB_VA);
> }
>
> /* Flush all mappings for a given PCID, not including globals. */
> -static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid)
> +static inline void __invlpgb_flush_single_pcid_nosync(unsigned long pcid)
> {
> __invlpgb(0, pcid, 0, 1, 0, INVLPGB_PCID);
> }
> @@ -131,7 +131,7 @@ static inline void invlpgb_flush_all(void)
> }
>
> /* Flush addr, including globals, for all PCIDs. */
> -static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr)
> +static inline void __invlpgb_flush_addr_nosync(unsigned long addr, u16 nr)
> {
> __invlpgb(0, 0, addr, nr, 0, INVLPGB_INCLUDE_GLOBAL);
> }
> diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
> index 8c21030269ff..cbdb86d58301 100644
> --- a/arch/x86/include/asm/tlbflush.h
> +++ b/arch/x86/include/asm/tlbflush.h
> @@ -105,6 +105,9 @@ struct tlb_state {
> * need to be invalidated.
> */
> bool invalidate_other;
> +#ifdef CONFIG_BROADCAST_TLB_FLUSH
> + bool need_tlbsync;
> +#endif
>
> #ifdef CONFIG_ADDRESS_MASKING
> /*
> @@ -292,11 +295,23 @@ static inline bool mm_in_asid_transition(struct mm_struct *mm)
>
> return mm && READ_ONCE(mm->context.asid_transition);
> }
> +
> +static inline bool cpu_need_tlbsync(void)
> +{
> + return this_cpu_read(cpu_tlbstate.need_tlbsync);
> +}
> +
> +static inline void cpu_set_tlbsync(bool state)
> +{
> + this_cpu_write(cpu_tlbstate.need_tlbsync, state);
> +}
> #else
> static inline u16 mm_global_asid(struct mm_struct *mm) { return 0; }
> static inline void mm_init_global_asid(struct mm_struct *mm) { }
> static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) { }
> static inline bool mm_in_asid_transition(struct mm_struct *mm) { return false; }
> +static inline bool cpu_need_tlbsync(void) { return false; }
> +static inline void cpu_set_tlbsync(bool state) { }
> #endif /* CONFIG_BROADCAST_TLB_FLUSH */
>
> #ifdef CONFIG_PARAVIRT
> @@ -346,21 +361,15 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
> return atomic64_inc_return(&mm->context.tlb_gen);
> }
>
> -static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> - struct mm_struct *mm,
> - unsigned long uaddr)
> -{
> - inc_mm_tlb_gen(mm);
> - cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
> - mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
> -}
> -
> static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
> {
> flush_tlb_mm(mm);
> }
>
> extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
> +extern void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> + struct mm_struct *mm,
> + unsigned long uaddr);
>
> static inline bool pte_flags_need_flush(unsigned long oldflags,
> unsigned long newflags,
> diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
> index 0efd99053c09..83ba6876adbf 100644
> --- a/arch/x86/mm/tlb.c
> +++ b/arch/x86/mm/tlb.c
> @@ -492,6 +492,37 @@ static void finish_asid_transition(struct flush_tlb_info *info)
> mm_clear_asid_transition(mm);
> }
>
> +static inline void tlbsync(void)
> +{
> + if (cpu_need_tlbsync()) {
> + __tlbsync();
> + cpu_set_tlbsync(false);
> + }
> +}
> +
> +static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid,
> + unsigned long addr,
> + u16 nr, bool pmd_stride)
> +{
> + __invlpgb_flush_user_nr_nosync(pcid, addr, nr, pmd_stride);
> + if (!cpu_need_tlbsync())
> + cpu_set_tlbsync(true);
> +}
> +
> +static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid)
> +{
> + __invlpgb_flush_single_pcid_nosync(pcid);
> + if (!cpu_need_tlbsync())
> + cpu_set_tlbsync(true);
> +}
> +
> +static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr)
> +{
> + __invlpgb_flush_addr_nosync(addr, nr);
> + if (!cpu_need_tlbsync())
> + cpu_set_tlbsync(true);
> +}
One thought on these:
Instead of having three functions:
1. A raw __invlpgb_*_nosync()
2. A wrapper invlpgb_*_nosync() that flips cpu_set_tlbsync()
3. A wrapper invlpgb_*()
Could we get away with just two? For instance, what if we had *ALL*
__invlpgb()'s do cpu_set_tlbsync()? Then we'd universally call tlbsync().
static inline void invlpgb_flush_all_nonglobals(void)
{
guard(preempt)();
__invlpgb(0, 0, 0, 1, NO_STRIDE, INVLPGB_MODE_ALL_NONGLOBALS);
tlbsync();
}
Then we wouldn't need any of those three new wrappers. The only downside
is that we'd end up with paths that logically do:
__invlpgb()
cpu_set_tlbsync(true);
if (cpu_need_tlbsync()) { // always true
__tlbsync();
cpu_set_tlbsync(true);
}
In other words, a possibly superfluous set/check/clear of the
"need_tlbsync" state. But I'd expect that to be a pittance compared to
the actual cost of INVLPGB/TLBSYNC.
> static void broadcast_tlb_flush(struct flush_tlb_info *info)
> {
> bool pmd = info->stride_shift == PMD_SHIFT;
> @@ -790,6 +821,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
> if (IS_ENABLED(CONFIG_PROVE_LOCKING))
> WARN_ON_ONCE(!irqs_disabled());
>
> + tlbsync();
This one is in dire need of comments.
> /*
> * Verify that CR3 is what we think it is. This will catch
> * hypothetical buggy code that directly switches to swapper_pg_dir
> @@ -966,6 +999,8 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
> */
> void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
> {
> + tlbsync();
Ditto, *especially* if this hits the init_mm state. There really
shouldn't be any deferred flushes for the init_mm.
> if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
> return;
>
> @@ -1633,9 +1668,7 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
> * a local TLB flush is needed. Optimize this use-case by calling
> * flush_tlb_func_local() directly in this case.
> */
> - if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
> - invlpgb_flush_all_nonglobals();
> - } else if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
> + if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
> flush_tlb_multi(&batch->cpumask, info);
> } else if (cpumask_test_cpu(cpu, &batch->cpumask)) {
> lockdep_assert_irqs_enabled();
> @@ -1644,12 +1677,52 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
> local_irq_enable();
> }
>
> + /*
> + * If (asynchronous) INVLPGB flushes were issued, wait for them here.
> + * The cpumask above contains only CPUs that were running tasks
> + * not using broadcast TLB flushing.
> + */
> + tlbsync();
I have a suggested comment in the attached fixups. This makes it sound
like tasks are either INVLPGB *or* in ->cpumask. Transitioning mms can
be both, I think.
> cpumask_clear(&batch->cpumask);
>
> put_flush_tlb_info();
> put_cpu();
> }
>
> +void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
> + struct mm_struct *mm,
> + unsigned long uaddr)
> +{
> + u16 asid = mm_global_asid(mm);
> +
> + if (asid) {
> + invlpgb_flush_user_nr_nosync(kern_pcid(asid), uaddr, 1, false);
> + /* Do any CPUs supporting INVLPGB need PTI? */
> + if (cpu_feature_enabled(X86_FEATURE_PTI))
> + invlpgb_flush_user_nr_nosync(user_pcid(asid), uaddr, 1, false);
> +
> + /*
> + * Some CPUs might still be using a local ASID for this
> + * process, and require IPIs, while others are using the
> + * global ASID.
> + *
> + * In this corner case, both broadcast TLB invalidation
> + * and IPIs need to be sent. The IPIs will help
> + * stragglers transition to the broadcast ASID.
> + */
> + if (mm_in_asid_transition(mm))
> + asid = 0;
> + }
> +
> + if (!asid) {
> + inc_mm_tlb_gen(mm);
> + cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
> + }
> +
> + mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
> +}
I stuck some better comments in the patch. I think this should both
mention that a later tlbsync() is required and *also* match teh two
cases better.
Also, the "if (asid)" isn't super nice naming. Let's please call it
"global_asid" because it's either a global ASID or 0.
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 8e596de963a8f..8e1ac8be123b2 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -821,6 +821,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
WARN_ON_ONCE(!irqs_disabled());
+ // FIXME
+ // This is totally unexplained and needs justification and
+ // commenting
tlbsync();
/*
@@ -1678,9 +1681,10 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
}
/*
- * If (asynchronous) INVLPGB flushes were issued, wait for them here.
- * The cpumask above contains only CPUs that were running tasks
- * not using broadcast TLB flushing.
+ * Wait for outstanding INVLPGB flushes. batch->cpumask will
+ * be empty when the batch was handled completely by INVLPGB.
+ * Note that mm_in_asid_transition() mm's may use INVLPGB and
+ * the flush_tlb_multi() IPIs at the same time.
*/
tlbsync();
@@ -1693,9 +1697,14 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
struct mm_struct *mm, unsigned long uaddr)
{
- u16 asid = mm_global_asid(mm);
+ u16 global_asid = mm_global_asid(mm);
- if (asid) {
+ if (global_asid) {
+ /*
+ * Global ASIDs can be flushed with INVLPGB. Flush
+ * now instead of batching them for later. A later
+ * tlbsync() is required to ensure these completed.
+ */
invlpgb_flush_user_nr_nosync(kern_pcid(asid), uaddr, 1, false);
/* Do any CPUs supporting INVLPGB need PTI? */
if (cpu_feature_enabled(X86_FEATURE_PTI))
@@ -1714,7 +1723,11 @@ void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
asid = 0;
}
- if (!asid) {
+ if (!global_asid) {
+ /*
+ * Mark the mm and the CPU so that
+ * the TLB gets flushed later.
+ */
inc_mm_tlb_gen(mm);
cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
}