On Fri, Oct 08 2021 at 22:37, Kuppuswamy Sathyanarayanan wrote:
+/*Please do not use tail comments and with a tab between type and name
+ * Used by #VE exception handler to gather the #VE exception
+ * info from the TDX module. This is software only structure
+ * and not related to TDX module/VMM.
+ */
+struct ve_info {
+ u64 exit_reason;
+ u64 exit_qual;
+ u64 gla; /* Guest Linear (virtual) Address */
+ u64 gpa; /* Guest Physical (virtual) Address */
this becomes more readable:
/* Guest Linear (virtual) Address */
u64 gla;
/* Guest Physical (virtual) Address */
u64 gpa;
Hmm?
+bool tdx_get_ve_info(struct ve_info *ve)This should be WARN_ON_ONCE() if at all.
+{
+ struct tdx_module_output out;
+ u64 ret;
+
+ if (!ve)
+ return false;
+ /*if (__tdx...())
+ * NMIs and machine checks are suppressed. Before this point any
+ * #VE is fatal. After this point (TDGETVEINFO call), NMIs and
+ * additional #VEs are permitted (but it is expected not to
+ * happen unless kernel panics).
+ */
+ ret = __tdx_module_call(TDX_GET_VEINFO, 0, 0, 0, 0, &out);
+ if (ret)
+ return false;
return false;
+ ve->exit_reason = out.rcx;Please do not glue the #define and the function definition
+ ve->exit_qual = out.rdx;
+ ve->gla = out.r8;
+ ve->gpa = out.r9;
+ ve->instr_len = out.r10 & UINT_MAX;
+ ve->instr_info = out.r10 >> 32;
+
+ return true;
+}
+
+bool tdx_handle_virtualization_exception(struct pt_regs *regs,
+ struct ve_info *ve)
+{
+ pr_warn("Unexpected #VE: %lld\n", ve->exit_reason);
+ return false;
+}
+
void __init tdx_early_init(void)
{
if (!is_tdx_guest())
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a58800973aed..70d76c3a548f 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -61,6 +61,7 @@
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/vdso.h>
+#include <asm/tdx.h>
#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
@@ -1140,6 +1141,82 @@ DEFINE_IDTENTRY(exc_device_not_available)
}
}
+#ifdef CONFIG_INTEL_TDX_GUEST
+#define VE_FAULT_STR "VE fault"
+static void ve_raise_fault(struct pt_regs *regs, long error_code)
together. Newlines exist for a reaon.
+{Expected?
+ struct task_struct *tsk = current;
+
+ if (user_mode(regs)) {
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_nr = X86_TRAP_VE;
+
+ /*
+ * Not fixing up VDSO exceptions similar to #GP handler
+ * because it is expected that VDSO doesn't trigger #VE.
+ */if (!preemptible() && kprobe_running() &&
+ show_signal(tsk, SIGSEGV, "", VE_FAULT_STR, regs, error_code);
+ force_sig(SIGSEGV);
+ return;
+ }
+
+ /*
+ * Attempt to recover from #VE exception failure without
+ * triggering OOPS (useful for MSR read/write failures)
+ */
+ if (fixup_exception(regs, X86_TRAP_VE, error_code, 0))
+ return;
+
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_nr = X86_TRAP_VE;
+
+ /*
+ * To be potentially processing a kprobe fault and to trust the result
+ * from kprobe_running(), it should be non-preemptible.
+ */
+ if (!preemptible() &&
+ kprobe_running() &&
+ kprobe_fault_handler(regs, X86_TRAP_VE))perhaps?
+Please remove that. The idtentry code is already taking care of that.
+DEFINE_IDTENTRY(exc_virtualization_exception)
+{
+ struct ve_info ve;
+ bool ret;
+
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+ /*s/This prevents.../This ensures that VE info cannot be overwritten by a
+ * NMIs/Machine-checks/Interrupts will be in a disabled state
+ * till TDGETVEINFO TDCALL is executed. This prevents #VE
+ * nesting issue.
nested #VE/
Or something like that perhaps?
Also a some comment about #VE in general above the DEFINE_IDTENTRY()
would be appreciated.
Thanks,
tglx