[tip: x86/mm] x86/kvm: Refactor L1D flush page management
From: tip-bot2 for Balbir Singh
Date: Fri May 22 2020 - 05:33:25 EST
The following commit has been merged into the x86/mm branch of tip:
Commit-ID: b9b3bc1c30be1f056c1c0564bc7268820ea8bf70
Gitweb: https://git.kernel.org/tip/b9b3bc1c30be1f056c1c0564bc7268820ea8bf70
Author: Balbir Singh <sblbir@xxxxxxxxxx>
AuthorDate: Sun, 10 May 2020 11:47:58 +10:00
Committer: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
CommitterDate: Wed, 13 May 2020 18:12:18 +02:00
x86/kvm: Refactor L1D flush page management
Split out the allocation and free routines and move them into builtin code
so they can be reused for the upcoming paranoid L1D flush on context switch
mitigation.
[ tglx: Add missing SPDX identifier and massage subject and changelog ]
Signed-off-by: Balbir Singh <sblbir@xxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Reviewed-by: Kees Cook <keescook@xxxxxxxxxxxx>
Link: https://lkml.kernel.org/r/20200510014803.12190-2-sblbir@xxxxxxxxxx
---
arch/x86/include/asm/cacheflush.h | 3 ++-
arch/x86/kernel/Makefile | 1 +-
arch/x86/kernel/l1d_flush.c | 39 ++++++++++++++++++++++++++++++-
arch/x86/kvm/vmx/vmx.c | 25 ++-----------------
4 files changed, 46 insertions(+), 22 deletions(-)
create mode 100644 arch/x86/kernel/l1d_flush.c
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63feaf2..bac56fc 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -6,6 +6,9 @@
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>
+#define L1D_CACHE_ORDER 4
void clflush_cache_range(void *addr, unsigned int size);
+void *l1d_flush_alloc_pages(void);
+void l1d_flush_cleanup_pages(void *l1d_flush_pages);
#endif /* _ASM_X86_CACHEFLUSH_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index ba89cab..c04d218 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -156,3 +156,4 @@ ifeq ($(CONFIG_X86_64),y)
endif
obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
+obj-y += l1d_flush.o
diff --git a/arch/x86/kernel/l1d_flush.c b/arch/x86/kernel/l1d_flush.c
new file mode 100644
index 0000000..4f298b7
--- /dev/null
+++ b/arch/x86/kernel/l1d_flush.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+
+void *l1d_flush_alloc_pages(void)
+{
+ struct page *page;
+ void *l1d_flush_pages = NULL;
+ int i;
+
+ /*
+ * This allocation for l1d_flush_pages is not tied to a VM/task's
+ * lifetime and so should not be charged to a memcg.
+ */
+ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+ if (!page)
+ return NULL;
+ l1d_flush_pages = page_address(page);
+
+ /*
+ * Initialize each page with a different pattern in
+ * order to protect against KSM in the nested
+ * virtualization case.
+ */
+ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
+ memset(l1d_flush_pages + i * PAGE_SIZE, i + 1,
+ PAGE_SIZE);
+ }
+ return l1d_flush_pages;
+}
+EXPORT_SYMBOL_GPL(l1d_flush_alloc_pages);
+
+void l1d_flush_cleanup_pages(void *l1d_flush_pages)
+{
+ free_pages((unsigned long)l1d_flush_pages, L1D_CACHE_ORDER);
+}
+EXPORT_SYMBOL_GPL(l1d_flush_cleanup_pages);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8305097..225aa82 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -203,14 +203,10 @@ static const struct {
[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
};
-#define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{
- struct page *page;
- unsigned int i;
-
if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
return 0;
@@ -253,24 +249,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
- /*
- * This allocation for vmx_l1d_flush_pages is not tied to a VM
- * lifetime and so should not be charged to a memcg.
- */
- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
- if (!page)
+ vmx_l1d_flush_pages = l1d_flush_alloc_pages();
+ if (!vmx_l1d_flush_pages)
return -ENOMEM;
- vmx_l1d_flush_pages = page_address(page);
-
- /*
- * Initialize each page with a different pattern in
- * order to protect against KSM in the nested
- * virtualization case.
- */
- for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
- memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
- PAGE_SIZE);
- }
}
l1tf_vmx_mitigation = l1tf;
@@ -8026,7 +8007,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
static void vmx_cleanup_l1d_flush(void)
{
if (vmx_l1d_flush_pages) {
- free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+ l1d_flush_cleanup_pages(vmx_l1d_flush_pages);
vmx_l1d_flush_pages = NULL;
}
/* Restore state so sysfs ignores VMX */