[RFC PATCH 63/73] x86/pvm: Add hypercall support

From: Lai Jiangshan
Date: Mon Feb 26 2024 - 10:00:23 EST


From: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx>

For the PVM guest, it will use the syscall instruction as the hypercall
instruction and follow the KVM hypercall call convention.

Signed-off-by: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx>
Signed-off-by: Hou Wenlong <houwenlong.hwl@xxxxxxxxxxxx>
---
arch/x86/entry/entry_64_pvm.S | 15 +++++++++++
arch/x86/include/asm/pvm_para.h | 1 +
arch/x86/kernel/pvm.c | 46 +++++++++++++++++++++++++++++++++
3 files changed, 62 insertions(+)

diff --git a/arch/x86/entry/entry_64_pvm.S b/arch/x86/entry/entry_64_pvm.S
index 256baf86a9f3..abb57e251e73 100644
--- a/arch/x86/entry/entry_64_pvm.S
+++ b/arch/x86/entry/entry_64_pvm.S
@@ -52,6 +52,21 @@ SYM_CODE_START(entry_SYSCALL_64_pvm)
jmp entry_SYSCALL_64_after_hwframe
SYM_CODE_END(entry_SYSCALL_64_pvm)

+.pushsection .noinstr.text, "ax"
+SYM_FUNC_START(pvm_hypercall)
+ push %r11
+ push %r10
+ movq %rcx, %r10
+ UNWIND_HINT_SAVE
+ syscall
+ UNWIND_HINT_RESTORE
+ movq %r10, %rcx
+ popq %r10
+ popq %r11
+ RET
+SYM_FUNC_END(pvm_hypercall)
+.popsection
+
/*
* The new RIP value that PVM event delivery establishes is
* MSR_PVM_EVENT_ENTRY for vector events that occur in user mode.
diff --git a/arch/x86/include/asm/pvm_para.h b/arch/x86/include/asm/pvm_para.h
index bfb08f0ea293..72c74545dba6 100644
--- a/arch/x86/include/asm/pvm_para.h
+++ b/arch/x86/include/asm/pvm_para.h
@@ -87,6 +87,7 @@ static inline bool pvm_kernel_layout_relocate(void)

void entry_SYSCALL_64_pvm(void);
void pvm_user_event_entry(void);
+void pvm_hypercall(void);
void pvm_retu_rip(void);
void pvm_rets_rip(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/kernel/pvm.c b/arch/x86/kernel/pvm.c
index b3b4ff0bbc91..352d74394c4a 100644
--- a/arch/x86/kernel/pvm.c
+++ b/arch/x86/kernel/pvm.c
@@ -27,6 +27,52 @@ unsigned long pvm_range_end __initdata;

static bool early_traps_setup __initdata;

+static __always_inline long pvm_hypercall0(unsigned int nr)
+{
+ long ret;
+
+ asm volatile("call pvm_hypercall"
+ : "=a"(ret)
+ : "a"(nr)
+ : "memory");
+ return ret;
+}
+
+static __always_inline long pvm_hypercall1(unsigned int nr, unsigned long p1)
+{
+ long ret;
+
+ asm volatile("call pvm_hypercall"
+ : "=a"(ret)
+ : "a"(nr), "b"(p1)
+ : "memory");
+ return ret;
+}
+
+static __always_inline long pvm_hypercall2(unsigned int nr, unsigned long p1,
+ unsigned long p2)
+{
+ long ret;
+
+ asm volatile("call pvm_hypercall"
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2)
+ : "memory");
+ return ret;
+}
+
+static __always_inline long pvm_hypercall3(unsigned int nr, unsigned long p1,
+ unsigned long p2, unsigned long p3)
+{
+ long ret;
+
+ asm volatile("call pvm_hypercall"
+ : "=a"(ret)
+ : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
+ : "memory");
+ return ret;
+}
+
void __init pvm_early_event(struct pt_regs *regs)
{
int vector = regs->orig_ax >> 32;
--
2.19.1.6.gb485710b