[PATCH v5 17/17] KVM: arm64: Introduce hyp_dump_backtrace()

From: Kalesh Singh
Date: Thu Jul 21 2022 - 01:58:45 EST


In non-protected nVHE mode, unwinds and dumps the hypervisor backtrace
from EL1. This is possible beacuase the host can directly access the
hypervisor stack pages in non-proteced mode.

Signed-off-by: Kalesh Singh <kaleshsingh@xxxxxxxxxx>
---

Changes in v5:
- Move code out from nvhe.h header to handle_exit.c, per Marc
- Fix stacktrace symoblization when CONFIG_RAMDOMIZE_BASE is enabled,
per Fuad
- Use regular comments instead of doc comments, per Fuad

arch/arm64/kvm/handle_exit.c | 65 +++++++++++++++++++++++++++++++-----
1 file changed, 56 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index ad568da5c7d7..432b6b26f4ad 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -17,6 +17,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
#include <asm/debug-monitors.h>
+#include <asm/stacktrace/nvhe.h>
#include <asm/traps.h>

#include <kvm/arm_hypercalls.h>
@@ -318,6 +319,56 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
}

+/*
+ * kvm_nvhe_print_backtrace_entry - Symbolizes and prints the HYP stack address
+ */
+static void kvm_nvhe_print_backtrace_entry(unsigned long addr,
+ unsigned long hyp_offset)
+{
+ unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
+
+ /* Mask tags and convert to kern addr */
+ addr = (addr & va_mask) + hyp_offset;
+ kvm_err(" [<%016lx>] %pB\n", addr, (void *)(addr + kaslr_offset()));
+}
+
+/*
+ * hyp_dump_backtrace_entry - Dump an entry of the non-protected nVHE HYP stacktrace
+ *
+ * @arg : the hypervisor offset, used for address translation
+ * @where : the program counter corresponding to the stack frame
+ */
+static bool hyp_dump_backtrace_entry(void *arg, unsigned long where)
+{
+ kvm_nvhe_print_backtrace_entry(where, (unsigned long)arg);
+
+ return true;
+}
+
+/*
+ * hyp_dump_backtrace - Dump the non-proteced nVHE HYP backtrace.
+ *
+ * @hyp_offset: hypervisor offset, used for address translation.
+ *
+ * The host can directly access HYP stack pages in non-protected
+ * mode, so the unwinding is done directly from EL1. This removes
+ * the need for shared buffers between host and hypervisor for
+ * the stacktrace.
+ */
+static void hyp_dump_backtrace(unsigned long hyp_offset)
+{
+ struct kvm_nvhe_stacktrace_info *stacktrace_info;
+ struct unwind_state state;
+
+ stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+
+ kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
+
+ kvm_err("Non-protected nVHE HYP call trace:\n");
+ unwind(&state, hyp_dump_backtrace_entry, (void *)hyp_offset);
+ kvm_err("---- End of Non-protected nVHE HYP call trace ----\n");
+}
+
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
pkvm_stacktrace);
@@ -336,18 +387,12 @@ static void pkvm_dump_backtrace(unsigned long hyp_offset)
{
unsigned long *stacktrace_entry
= (unsigned long *)this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
- unsigned long va_mask, pc;
-
- va_mask = GENMASK_ULL(vabits_actual - 1, 0);

kvm_err("Protected nVHE HYP call trace:\n");

- /* The stack trace is terminated by a null entry */
- for (; *stacktrace_entry; stacktrace_entry++) {
- /* Mask tags and convert to kern addr */
- pc = (*stacktrace_entry & va_mask) + hyp_offset;
- kvm_err(" [<%016lx>] %pB\n", pc, (void *)(pc + kaslr_offset()));
- }
+ /* The saved stacktrace is terminated by a null entry */
+ for (; *stacktrace_entry; stacktrace_entry++)
+ kvm_nvhe_print_backtrace_entry(*stacktrace_entry, hyp_offset);

kvm_err("---- End of Protected nVHE HYP call trace ----\n");
}
@@ -367,6 +412,8 @@ static void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
{
if (is_protected_kvm_enabled())
pkvm_dump_backtrace(hyp_offset);
+ else
+ hyp_dump_backtrace(hyp_offset);
}

void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
--
2.37.0.170.g444d1eabd0-goog