[RFC PATCH v2 4/4] arch/x86: L1D flush, optimize the context switch
From: Balbir Singh
Date: Wed Mar 25 2020 - 03:11:58 EST
Use a static branch/jump label to optimize the code. Right now
we don't ref count the users, but that could be done if needed
in the future.
Signed-off-by: Balbir Singh <sblbir@xxxxxxxxxx>
---
arch/x86/include/asm/nospec-branch.h | 2 ++
arch/x86/mm/tlb.c | 13 +++++++++++++
2 files changed, 15 insertions(+)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 07e95dcb40ad..371e28cea1f4 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -310,6 +310,8 @@ DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_user_clear);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+DECLARE_STATIC_KEY_FALSE(switch_mm_l1d_flush);
+
#include <asm/segment.h>
/**
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 22f96c5f74f0..bed2b6a5490d 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -155,6 +155,11 @@ EXPORT_SYMBOL_GPL(leave_mm);
static void *l1d_flush_pages;
static DEFINE_MUTEX(l1d_flush_mutex);
+/* Flush L1D on switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_l1d_flush);
+EXPORT_SYMBOL_GPL(switch_mm_l1d_flush);
+
+
int enable_l1d_flush_for_task(struct task_struct *tsk)
{
struct page *page;
@@ -170,6 +175,11 @@ int enable_l1d_flush_for_task(struct task_struct *tsk)
l1d_flush_pages = alloc_l1d_flush_pages();
if (!l1d_flush_pages)
return -ENOMEM;
+ /*
+ * We could do more accurate ref counting
+ * if needed
+ */
+ static_branch_enable(&switch_mm_l1d_flush);
}
mutex_unlock(&l1d_flush_mutex);
}
@@ -195,6 +205,9 @@ static void l1d_flush(struct mm_struct *next, struct task_struct *tsk)
{
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
+ if (static_branch_unlikely(&switch_mm_l1d_flush))
+ return;
+
/*
* If we are not really switching mm's, we can just return
*/
--
2.17.1