[PATCH] x86/mm/tlb: Do partial TLB flush when possible
From: Zhenzhong Duan
Date: Thu May 30 2019 - 04:00:10 EST
This is a small optimization to stale TLB flush, if there is one new TLB
flush, let it choose to do partial or full flush. or else, the stale
flush take over and do full flush.
Add unlikely() to info->freed_tables check as freeing page tables
is relatively less.
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@xxxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Srinivas Eeda <srinivas.eeda@xxxxxxxxxx>
Cc: x86@xxxxxxxxxx
---
arch/x86/mm/tlb.c | 20 +++++++++++++++-----
1 file changed, 15 insertions(+), 5 deletions(-)
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 91f6db9..63a8125 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -569,6 +569,17 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
return;
}
+ if (unlikely(f->new_tlb_gen <= local_tlb_gen &&
+ local_tlb_gen + 1 == mm_tlb_gen)) {
+ /*
+ * For stale TLB flush request, if there will be one new TLB
+ * flush coming, we leave the work to the new IPI as it knows
+ * partial or full TLB flush to take, or else we do the full
+ * flush.
+ */
+ trace_tlb_flush(reason, 0);
+ return;
+ }
WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
@@ -577,7 +588,8 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* This does not strictly imply that we need to flush (it's
* possible that f->new_tlb_gen <= local_tlb_gen), but we're
* going to need to flush in the very near future, so we might
- * as well get it over with.
+ * as well get it over with in case we know there will be more
+ * than one outstanding TLB flush request.
*
* The only question is whether to do a full or partial flush.
*
@@ -609,9 +621,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* local_tlb_gen all the way to mm_tlb_gen and we can probably
* avoid another flush in the very near future.
*/
- if (f->end != TLB_FLUSH_ALL &&
- f->new_tlb_gen == local_tlb_gen + 1 &&
- f->new_tlb_gen == mm_tlb_gen) {
+ if (f->end != TLB_FLUSH_ALL && local_tlb_gen + 1 == mm_tlb_gen) {
/* Partial flush */
unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
unsigned long addr = f->start;
@@ -703,7 +713,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
* up on the new contents of what used to be page tables, while
* doing a speculative memory access.
*/
- if (info->freed_tables)
+ if (unlikely(info->freed_tables))
smp_call_function_many(cpumask, flush_tlb_func_remote,
(void *)info, 1);
else
--
1.8.3.1