[PATCH 4/5] x86/head/64: Check SEV encryption before switching to kernel page-table
From: Joerg Roedel
Date: Mon Oct 19 2020 - 11:11:40 EST
From: Joerg Roedel <jroedel@xxxxxxx>
When SEV is enabled the kernel requests the C-Bit position again from
the hypervisor to built its own page-table. Since the hypervisor is an
untrusted source the C-bit position needs to be verified before the
kernel page-table is used.
Call the sev_verify_cbit() function before writing the CR3.
Signed-off-by: Joerg Roedel <jroedel@xxxxxxx>
---
arch/x86/kernel/head_64.S | 14 +++++++++++++-
arch/x86/mm/mem_encrypt.c | 1 +
2 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7eb2a1c87969..c6f4562359a5 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -161,7 +161,18 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
/* Setup early boot stage 4-/5-level pagetables. */
addq phys_base(%rip), %rax
- movq %rax, %cr3
+
+ /*
+ * For SEV guests: Verify that the C-bit is correct. A malicious
+ * hypervisor could lie about the C-bit position to perform a ROP
+ * attack on the guest by writing to the unencrypted stack and wait for
+ * the next RET instruction.
+ */
+ movq %rax, %rdi
+ call sev_verify_cbit
+
+ /* Switch to new page-table */
+ movq %rdi, %cr3
/* Ensure I am executing from virtual addresses */
movq $1f, %rax
@@ -279,6 +290,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
SYM_CODE_END(secondary_startup_64)
#include "verify_cpu.S"
+#include "sev_verify_cbit.S"
#ifdef CONFIG_HOTPLUG_CPU
/*
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index ebb7edc8bc0a..bd9b62af2e3d 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -39,6 +39,7 @@
*/
u64 sme_me_mask __section(.data) = 0;
u64 sev_status __section(.data) = 0;
+u64 sev_check_data __section(.data) = 0;
EXPORT_SYMBOL(sme_me_mask);
DEFINE_STATIC_KEY_FALSE(sev_enable_key);
EXPORT_SYMBOL_GPL(sev_enable_key);
--
2.28.0