[Part1 PATCH v4 02/17] x86/mm: Add Secure Encrypted Virtualization (SEV) support

From: Brijesh Singh
Date: Sat Sep 16 2017 - 08:39:23 EST


From: Tom Lendacky <thomas.lendacky@xxxxxxx>

Provide support for Secure Encrypted Virtualization (SEV). This initial
support defines a flag that is used by the kernel to determine if it is
running with SEV active.

Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxx>
Cc: Andy Lutomirski <luto@xxxxxxxxxx>
Cc: linux-kernel@xxxxxxxxxxxxxxx
Cc: x86@xxxxxxxxxx
Signed-off-by: Tom Lendacky <thomas.lendacky@xxxxxxx>
Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
---
arch/x86/include/asm/mem_encrypt.h | 6 ++++++
arch/x86/mm/mem_encrypt.c | 26 ++++++++++++++++++++++++++
include/linux/mem_encrypt.h | 12 ++++++++----
3 files changed, 40 insertions(+), 4 deletions(-)

diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 6a77c63540f7..2b024741bce9 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -47,6 +47,9 @@ void __init mem_encrypt_init(void);

void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);

+bool sme_active(void);
+bool sev_active(void);
+
#else /* !CONFIG_AMD_MEM_ENCRYPT */

#define sme_me_mask 0ULL
@@ -64,6 +67,9 @@ static inline void __init sme_early_init(void) { }
static inline void __init sme_encrypt_kernel(void) { }
static inline void __init sme_enable(struct boot_params *bp) { }

+static inline bool sme_active(void) { return false; }
+static inline bool sev_active(void) { return false; }
+
#endif /* CONFIG_AMD_MEM_ENCRYPT */

/*
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 3fcc8e01683b..4e6dcabe52fc 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -40,6 +40,8 @@ static char sme_cmdline_off[] __initdata = "off";
u64 sme_me_mask __section(.data) = 0;
EXPORT_SYMBOL_GPL(sme_me_mask);

+unsigned int sev_enabled __section(.data) = 0;
+
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);

@@ -190,6 +192,30 @@ void __init sme_early_init(void)
protection_map[i] = pgprot_encrypted(protection_map[i]);
}

+/*
+ * SME and SEV are very similar but they are not the same, so there are
+ * times that the kernel will need to distinguish between SME and SEV. The
+ * sme_active() and sev_active() functions are used for this. When a
+ * distinction isn't needed, the mem_encrypt_active() function can be used.
+ *
+ * The trampoline code is a good example for this requirement. Before
+ * paging is activated, SME will access all memory as decrypted, but SEV
+ * will access all memory as encrypted. So, when APs are being brought
+ * up under SME the trampoline area cannot be encrypted, whereas under SEV
+ * the trampoline area must be encrypted.
+ */
+bool sme_active(void)
+{
+ return sme_me_mask && !sev_enabled;
+}
+EXPORT_SYMBOL_GPL(sme_active);
+
+bool sev_active(void)
+{
+ return sme_me_mask && sev_enabled;
+}
+EXPORT_SYMBOL_GPL(sev_active);
+
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void)
{
diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
index 265a9cd21cb4..b55ba30a60a0 100644
--- a/include/linux/mem_encrypt.h
+++ b/include/linux/mem_encrypt.h
@@ -22,17 +22,21 @@
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */

#define sme_me_mask 0ULL
+#define sev_enabled 0
+
+static inline bool sme_active(void) { return false; }
+static inline bool sev_active(void) { return false; }

#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */

-static inline bool sme_active(void)
+static inline unsigned long sme_get_me_mask(void)
{
- return !!sme_me_mask;
+ return sme_me_mask;
}

-static inline u64 sme_get_me_mask(void)
+static inline bool mem_encrypt_active(void)
{
- return sme_me_mask;
+ return !!sme_me_mask;
}

#ifdef CONFIG_AMD_MEM_ENCRYPT
--
2.9.5