[RFC PATCH V4 13/17] x86/sev: Add Check of #HV event in path

From: Tianyu Lan
Date: Mon Apr 03 2023 - 13:45:59 EST


From: Tianyu Lan <tiala@xxxxxxxxxxxxx>

Add check_hv_pending() and check_hv_pending_after_irq() to
check queued #HV event when irq is disabled.

Signed-off-by: Tianyu Lan <tiala@xxxxxxxxxxxxx>
---
arch/x86/entry/entry_64.S | 18 ++++++++++++++++
arch/x86/include/asm/irqflags.h | 11 ++++++++++
arch/x86/kernel/sev.c | 38 +++++++++++++++++++++++++++++++++
3 files changed, 67 insertions(+)

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index d877774c3141..efa56dfde19e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1073,6 +1073,15 @@ SYM_CODE_END(paranoid_entry)
* R15 - old SPEC_CTRL
*/
SYM_CODE_START_LOCAL(paranoid_exit)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ /*
+ * If a #HV was delivered during execution and interrupts were
+ * disabled, then check if it can be handled before the iret
+ * (which may re-enable interrupts).
+ */
+ mov %rsp, %rdi
+ call check_hv_pending
+#endif
UNWIND_HINT_REGS

/*
@@ -1197,6 +1206,15 @@ SYM_CODE_START(error_entry)
SYM_CODE_END(error_entry)

SYM_CODE_START_LOCAL(error_return)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ /*
+ * If a #HV was delivered during execution and interrupts were
+ * disabled, then check if it can be handled before the iret
+ * (which may re-enable interrupts).
+ */
+ mov %rsp, %rdi
+ call check_hv_pending
+#endif
UNWIND_HINT_REGS
DEBUG_ENTRY_ASSERT_IRQS_OFF
testb $3, CS(%rsp)
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 8c5ae649d2df..8368e3fe2d36 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -11,6 +11,10 @@
/*
* Interrupt control:
*/
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+void check_hv_pending(struct pt_regs *regs);
+void check_hv_pending_irq_enable(void);
+#endif

/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
extern inline unsigned long native_save_fl(void);
@@ -40,12 +44,19 @@ static __always_inline void native_irq_disable(void)
static __always_inline void native_irq_enable(void)
{
asm volatile("sti": : :"memory");
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ check_hv_pending_irq_enable();
+#endif
}

static __always_inline void native_safe_halt(void)
{
mds_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory");
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ check_hv_pending_irq_enable();
+#endif
}

static __always_inline void native_halt(void)
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 2684a45b50a6..6445f5356c45 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -179,6 +179,44 @@ void noinstr __sev_es_ist_enter(struct pt_regs *regs)
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
}

+static void do_exc_hv(struct pt_regs *regs)
+{
+ /* Handle #HV exception. */
+}
+
+void check_hv_pending(struct pt_regs *regs)
+{
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return;
+
+ if ((regs->flags & X86_EFLAGS_IF) == 0)
+ return;
+
+ do_exc_hv(regs);
+}
+
+void check_hv_pending_irq_enable(void)
+{
+ struct pt_regs regs;
+
+ if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+ return;
+
+ memset(&regs, 0, sizeof(struct pt_regs));
+ asm volatile("movl %%cs, %%eax;" : "=a" (regs.cs));
+ asm volatile("movl %%ss, %%eax;" : "=a" (regs.ss));
+ regs.orig_ax = 0xffffffff;
+ regs.flags = native_save_fl();
+
+ /*
+ * Disable irq when handle pending #HV events after
+ * re-enabling irq.
+ */
+ asm volatile("cli" : : : "memory");
+ do_exc_hv(&regs);
+ asm volatile("sti" : : : "memory");
+}
+
void noinstr __sev_es_ist_exit(void)
{
unsigned long ist;
--
2.25.1