[PATCH v4 17/18] KVM: arm64: Introduce hyp_dump_backtrace()

From: Kalesh Singh
Date: Fri Jul 15 2022 - 02:13:16 EST


In non-protected nVHE mode, unwinds and dumps the hypervisor backtrace
from EL1. This is possible beacuase the host can directly access the
hypervisor stack pages in non-proteced mode.

Signed-off-by: Kalesh Singh <kaleshsingh@xxxxxxxxxx>
---
arch/arm64/include/asm/stacktrace/nvhe.h | 64 +++++++++++++++++++++---
1 file changed, 56 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/stacktrace/nvhe.h b/arch/arm64/include/asm/stacktrace/nvhe.h
index ec1a4ee21c21..c322ac95b256 100644
--- a/arch/arm64/include/asm/stacktrace/nvhe.h
+++ b/arch/arm64/include/asm/stacktrace/nvhe.h
@@ -190,6 +190,56 @@ static int notrace unwind_next(struct unwind_state *state)
}
NOKPROBE_SYMBOL(unwind_next);

+/**
+ * kvm_nvhe_print_backtrace_entry - Symbolizes and prints the HYP stack address
+ */
+static inline void kvm_nvhe_print_backtrace_entry(unsigned long addr,
+ unsigned long hyp_offset)
+{
+ unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
+
+ /* Mask tags and convert to kern addr */
+ addr = (addr & va_mask) + hyp_offset;
+ kvm_err(" [<%016lx>] %pB\n", addr, (void *)addr);
+}
+
+/**
+ * hyp_backtrace_entry - Dump an entry of the non-protected nVHE HYP stacktrace
+ *
+ * @arg : the hypervisor offset, used for address translation
+ * @where : the program counter corresponding to the stack frame
+ */
+static inline bool hyp_dump_backtrace_entry(void *arg, unsigned long where)
+{
+ kvm_nvhe_print_backtrace_entry(where, (unsigned long)arg);
+
+ return true;
+}
+
+/**
+ * hyp_dump_backtrace - Dump the non-proteced nVHE HYP backtrace.
+ *
+ * @hyp_offset: hypervisor offset, used for address translation.
+ *
+ * The host can directly access HYP stack pages in non-protected
+ * mode, so the unwinding is done directly from EL1. This removes
+ * the need for shared buffers between host and hypervisor for
+ * the stacktrace.
+ */
+static inline void hyp_dump_backtrace(unsigned long hyp_offset)
+{
+ struct kvm_nvhe_stacktrace_info *stacktrace_info;
+ struct unwind_state state;
+
+ stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
+
+ kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
+
+ kvm_err("Non-protected nVHE HYP call trace:\n");
+ unwind(&state, hyp_dump_backtrace_entry, (void *)hyp_offset);
+ kvm_err("---- End of Non-protected nVHE HYP call trace ----\n");
+}
+
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);

@@ -206,22 +256,18 @@ DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm
static inline void pkvm_dump_backtrace(unsigned long hyp_offset)
{
unsigned long *stacktrace_pos;
- unsigned long va_mask, pc;

stacktrace_pos = (unsigned long *)this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
- va_mask = GENMASK_ULL(vabits_actual - 1, 0);

kvm_err("Protected nVHE HYP call trace:\n");

- /* The stack trace is terminated by a null entry */
- for (; *stacktrace_pos; stacktrace_pos++) {
- /* Mask tags and convert to kern addr */
- pc = (*stacktrace_pos & va_mask) + hyp_offset;
- kvm_err(" [<%016lx>] %pB\n", pc, (void *)pc);
- }
+ /* The saved stacktrace is terminated by a null entry */
+ for (; *stacktrace_pos; stacktrace_pos++)
+ kvm_nvhe_print_backtrace_entry(*stacktrace_pos, hyp_offset);

kvm_err("---- End of Protected nVHE HYP call trace ----\n");
}
+
#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
static inline void pkvm_dump_backtrace(unsigned long hyp_offset)
{
@@ -238,6 +284,8 @@ static inline void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
{
if (is_protected_kvm_enabled())
pkvm_dump_backtrace(hyp_offset);
+ else
+ hyp_dump_backtrace(hyp_offset);
}
#endif /* __KVM_NVHE_HYPERVISOR__ */
#endif /* __ASM_STACKTRACE_NVHE_H */
--
2.37.0.170.g444d1eabd0-goog