[PATCH v6 41/76] x86/sev-es: Compile early handler code into kernel image

From: Joerg Roedel
Date: Mon Aug 24 2020 - 05:11:55 EST


From: Joerg Roedel <jroedel@xxxxxxx>

Setup sev-es.c and include the code from the
pre-decompression stage to also build it into the image of the running
kernel. Temporarily add __maybe_unused annotations to avoid build
warnings until the functions get used.

Signed-off-by: Joerg Roedel <jroedel@xxxxxxx>
Link: https://lore.kernel.org/r/20200724160336.5435-41-joro@xxxxxxxxxx
---
arch/x86/kernel/Makefile | 1 +
arch/x86/kernel/sev-es-shared.c | 21 ++--
arch/x86/kernel/sev-es.c | 163 ++++++++++++++++++++++++++++++++
3 files changed, 175 insertions(+), 10 deletions(-)
create mode 100644 arch/x86/kernel/sev-es.c

diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index e77261db2391..23bc0f830f18 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -145,6 +145,7 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o

+obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev-es.o
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c
index a6b41910b8ab..18619279a46f 100644
--- a/arch/x86/kernel/sev-es-shared.c
+++ b/arch/x86/kernel/sev-es-shared.c
@@ -9,7 +9,7 @@
* and is included directly into both code-bases.
*/

-static void sev_es_terminate(unsigned int reason)
+static void __maybe_unused sev_es_terminate(unsigned int reason)
{
u64 val = GHCB_SEV_TERMINATE;

@@ -27,7 +27,7 @@ static void sev_es_terminate(unsigned int reason)
asm volatile("hlt\n" : : : "memory");
}

-static bool sev_es_negotiate_protocol(void)
+static bool __maybe_unused sev_es_negotiate_protocol(void)
{
u64 val;

@@ -46,7 +46,7 @@ static bool sev_es_negotiate_protocol(void)
return true;
}

-static void vc_ghcb_invalidate(struct ghcb *ghcb)
+static void __maybe_unused vc_ghcb_invalidate(struct ghcb *ghcb)
{
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
@@ -58,9 +58,9 @@ static bool vc_decoding_needed(unsigned long exit_code)
exit_code <= SVM_EXIT_LAST_EXCP);
}

-static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
- struct pt_regs *regs,
- unsigned long exit_code)
+static enum es_result __maybe_unused vc_init_em_ctxt(struct es_em_ctxt *ctxt,
+ struct pt_regs *regs,
+ unsigned long exit_code)
{
enum es_result ret = ES_OK;

@@ -73,7 +73,7 @@ static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
return ret;
}

-static void vc_finish_insn(struct es_em_ctxt *ctxt)
+static void __maybe_unused vc_finish_insn(struct es_em_ctxt *ctxt)
{
ctxt->regs->ip += ctxt->insn.length;
}
@@ -325,7 +325,8 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
return ES_OK;
}

-static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
+static enum es_result __maybe_unused
+vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
{
struct pt_regs *regs = ctxt->regs;
u64 exit_info_1, exit_info_2;
@@ -433,8 +434,8 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
return ret;
}

-static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
- struct es_em_ctxt *ctxt)
+static enum es_result __maybe_unused vc_handle_cpuid(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
{
struct pt_regs *regs = ctxt->regs;
u32 cr4 = native_read_cr4();
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
new file mode 100644
index 000000000000..0b698b653c0b
--- /dev/null
+++ b/arch/x86/kernel/sev-es.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Memory Encryption Support
+ *
+ * Copyright (C) 2019 SUSE
+ *
+ * Author: Joerg Roedel <jroedel@xxxxxxx>
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+
+#include <asm/sev-es.h>
+#include <asm/insn-eval.h>
+#include <asm/fpu/internal.h>
+#include <asm/processor.h>
+#include <asm/trap_pf.h>
+#include <asm/trapnr.h>
+#include <asm/svm.h>
+
+static inline u64 sev_es_rd_ghcb_msr(void)
+{
+ return native_read_msr(MSR_AMD64_SEV_ES_GHCB);
+}
+
+static inline void sev_es_wr_ghcb_msr(u64 val)
+{
+ u32 low, high;
+
+ low = (u32)(val);
+ high = (u32)(val >> 32);
+
+ native_write_msr(MSR_AMD64_SEV_ES_GHCB, low, high);
+}
+
+static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
+ unsigned char *buffer)
+{
+ return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
+}
+
+static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+{
+ char buffer[MAX_INSN_SIZE];
+ enum es_result ret;
+ int res;
+
+ res = vc_fetch_insn_kernel(ctxt, buffer);
+ if (unlikely(res == -EFAULT)) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = 0;
+ ctxt->fi.cr2 = ctxt->regs->ip;
+ return ES_EXCEPTION;
+ }
+
+ insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE - res, 1);
+ insn_get_length(&ctxt->insn);
+
+ ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED;
+
+ return ret;
+}
+
+static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
+ char *dst, char *buf, size_t size)
+{
+ unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
+ char __user *target = (char __user *)dst;
+ u64 d8;
+ u32 d4;
+ u16 d2;
+ u8 d1;
+
+ switch (size) {
+ case 1:
+ memcpy(&d1, buf, 1);
+ if (put_user(d1, target))
+ goto fault;
+ break;
+ case 2:
+ memcpy(&d2, buf, 2);
+ if (put_user(d2, target))
+ goto fault;
+ break;
+ case 4:
+ memcpy(&d4, buf, 4);
+ if (put_user(d4, target))
+ goto fault;
+ break;
+ case 8:
+ memcpy(&d8, buf, 8);
+ if (put_user(d8, target))
+ goto fault;
+ break;
+ default:
+ WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ return ES_UNSUPPORTED;
+ }
+
+ return ES_OK;
+
+fault:
+ if (user_mode(ctxt->regs))
+ error_code |= X86_PF_USER;
+
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = error_code;
+ ctxt->fi.cr2 = (unsigned long)dst;
+
+ return ES_EXCEPTION;
+}
+
+static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
+ char *src, char *buf, size_t size)
+{
+ unsigned long error_code = X86_PF_PROT;
+ char __user *s = (char __user *)src;
+ u64 d8;
+ u32 d4;
+ u16 d2;
+ u8 d1;
+
+ switch (size) {
+ case 1:
+ if (get_user(d1, s))
+ goto fault;
+ memcpy(buf, &d1, 1);
+ break;
+ case 2:
+ if (get_user(d2, s))
+ goto fault;
+ memcpy(buf, &d2, 2);
+ break;
+ case 4:
+ if (get_user(d4, s))
+ goto fault;
+ memcpy(buf, &d4, 4);
+ break;
+ case 8:
+ if (get_user(d8, s))
+ goto fault;
+ memcpy(buf, &d8, 8);
+ break;
+ default:
+ WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
+ return ES_UNSUPPORTED;
+ }
+
+ return ES_OK;
+
+fault:
+ if (user_mode(ctxt->regs))
+ error_code |= X86_PF_USER;
+
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = error_code;
+ ctxt->fi.cr2 = (unsigned long)src;
+
+ return ES_EXCEPTION;
+}
+
+/* Include code shared with pre-decompression boot stage */
+#include "sev-es-shared.c"
--
2.28.0