[RFC PATCH 62/73] x86/pvm: Add early kernel event entry and dispatch code

From: Lai Jiangshan
Date: Mon Feb 26 2024 - 10:00:03 EST


From: Hou Wenlong <houwenlong.hwl@xxxxxxxxxxxx>

Since PVM doesn't support IDT-based event delivery, it needs to handle
early kernel events during the booting. Currently, there are two stages
before the final IDT setup. Firstly, all exception handlers are set as
do_early_exception() in idt_setup_early_handlers(). Later, #DB, #BP, and
dispatch code.

Signed-off-by: Hou Wenlong <houwenlong.hwl@xxxxxxxxxxxx>
Signed-off-by: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx>
---
arch/x86/include/asm/pvm_para.h | 5 +++++
arch/x86/kernel/head_64.S | 21 +++++++++++++++++++++
arch/x86/kernel/pvm.c | 33 +++++++++++++++++++++++++++++++++
3 files changed, 59 insertions(+)

diff --git a/arch/x86/include/asm/pvm_para.h b/arch/x86/include/asm/pvm_para.h
index 9216e539fea8..bfb08f0ea293 100644
--- a/arch/x86/include/asm/pvm_para.h
+++ b/arch/x86/include/asm/pvm_para.h
@@ -13,6 +13,7 @@ typedef void (*idtentry_t)(struct pt_regs *regs);
#include <uapi/asm/kvm_para.h>

void __init pvm_early_setup(void);
+void __init pvm_setup_early_traps(void);
void __init pvm_install_sysvec(unsigned int sysvec, idtentry_t handler);
bool __init pvm_kernel_layout_relocate(void);

@@ -70,6 +71,10 @@ static inline void pvm_early_setup(void)
{
}

+static inline void pvm_setup_early_traps(void)
+{
+}
+
static inline void pvm_install_sysvec(unsigned int sysvec, idtentry_t handler)
{
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 1d931bab4393..6ad3aedca7da 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -633,6 +633,27 @@ SYM_CODE_START_NOALIGN(vc_no_ghcb)
SYM_CODE_END(vc_no_ghcb)
#endif

+#ifdef CONFIG_PVM_GUEST
+ .align 256
+SYM_CODE_START_NOALIGN(pvm_early_kernel_event_entry)
+ UNWIND_HINT_ENTRY
+ ENDBR
+
+ incl early_recursion_flag(%rip)
+
+ /* set %rcx, %r11 per PVM event handling specification */
+ movq 6*8(%rsp), %rcx
+ movq 7*8(%rsp), %r11
+
+ PUSH_AND_CLEAR_REGS
+ movq %rsp, %rdi /* %rdi -> pt_regs */
+ call pvm_early_event
+
+ decl early_recursion_flag(%rip)
+ jmp pvm_restore_regs_and_return_to_kernel
+SYM_CODE_END(pvm_early_kernel_event_entry)
+#endif
+
#define SYM_DATA_START_PAGE_ALIGNED(name) \
SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)

diff --git a/arch/x86/kernel/pvm.c b/arch/x86/kernel/pvm.c
index 88b013185ecd..b3b4ff0bbc91 100644
--- a/arch/x86/kernel/pvm.c
+++ b/arch/x86/kernel/pvm.c
@@ -17,6 +17,7 @@
#include <asm/cpu_entry_area.h>
#include <asm/desc.h>
#include <asm/pvm_para.h>
+#include <asm/setup.h>
#include <asm/traps.h>

DEFINE_PER_CPU_PAGE_ALIGNED(struct pvm_vcpu_struct, pvm_vcpu_struct);
@@ -24,6 +25,38 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct pvm_vcpu_struct, pvm_vcpu_struct);
unsigned long pvm_range_start __initdata;
unsigned long pvm_range_end __initdata;

+static bool early_traps_setup __initdata;
+
+void __init pvm_early_event(struct pt_regs *regs)
+{
+ int vector = regs->orig_ax >> 32;
+
+ if (!early_traps_setup) {
+ do_early_exception(regs, vector);
+ return;
+ }
+
+ switch (vector) {
+ case X86_TRAP_DB:
+ exc_debug(regs);
+ return;
+ case X86_TRAP_BP:
+ exc_int3(regs);
+ return;
+ case X86_TRAP_PF:
+ exc_page_fault(regs, regs->orig_ax);
+ return;
+ default:
+ do_early_exception(regs, vector);
+ return;
+ }
+}
+
+void __init pvm_setup_early_traps(void)
+{
+ early_traps_setup = true;
+}
+
static noinstr void pvm_bad_event(struct pt_regs *regs, unsigned long vector,
unsigned long error_code)
{
--
2.19.1.6.gb485710b