[RFC PATCH 7/7] x86_64/tlb, mm: enable cpumask optimzation for RSS cache
From: Kairui Song
Date: Thu Jul 28 2022 - 16:46:12 EST
From: Kairui Song <kasong@xxxxxxxxxxx>
Enable CONFIG_ARCH_PCP_RSS_USE_CPUMASK for x86_64, we do a
RSS cache switch in switch_mm_irqs_off. On x86_64 this is the unified
routine for switching a mm, so hook into this can make sure any dead mm
will have their cache invalidated in time, and cpumask is synced with
cache state.
Signed-off-by: Kairui Song <kasong@xxxxxxxxxxx>
---
arch/x86/Kconfig | 1 +
arch/x86/mm/tlb.c | 5 +++++
2 files changed, 6 insertions(+)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 52a7f91527fe..15e2b29ba972 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -125,6 +125,7 @@ config X86
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_THP_SWAP if X86_64
select ARCH_HAS_PARANOID_L1D_FLUSH
+ select ARCH_PCP_RSS_USE_CPUMASK if X86_64
select BUILDTIME_TABLE_SORT
select CLKEVT_I8253
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index d400b6d9d246..614865f94d85 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -597,6 +597,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
*/
cond_mitigation(tsk);
+ /*
+ * Flush RSS cache before clear up the bitmask
+ */
+ switch_pcp_rss_cache_no_irq(next);
+
/*
* Stop remote flushes for the previous mm.
* Skip kernel threads; we never send init_mm TLB flushing IPIs,
--
2.35.2