Commit 0e773da1 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

KVM: arm64: Move nVHE-only helpers into kvm/stacktrace.c



kvm_nvhe_stack_kern_va() only makes sense as part of the nVHE
unwinder, so simply move it there.

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Reviewed-by: default avatarKalesh Singh <kaleshsingh@google.com>
Tested-by: default avatarKalesh Singh <kaleshsingh@google.com>
Reviewed-by: default avatarOliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20220727142906.1856759-5-maz@kernel.org
parent 4e00532f
Loading
Loading
Loading
Loading
+0 −41
Original line number Diff line number Diff line
@@ -49,47 +49,6 @@ DECLARE_KVM_NVHE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overf
DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
DECLARE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);

/*
 * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
 *
 * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
 * allow for guard pages below the stack. Consequently, the fixed offset address
 * translation macros won't work here.
 *
 * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
 * stack base.
 *
 * Returns true on success and updates @addr to its corresponding kernel VA;
 * otherwise returns false.
 */
static inline bool kvm_nvhe_stack_kern_va(unsigned long *addr,
					  enum stack_type type)
{
	struct kvm_nvhe_stacktrace_info *stacktrace_info;
	unsigned long hyp_base, kern_base, hyp_offset;

	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);

	switch (type) {
	case STACK_TYPE_HYP:
		kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
		hyp_base = (unsigned long)stacktrace_info->stack_base;
		break;
	case STACK_TYPE_OVERFLOW:
		kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
		hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
		break;
	default:
		return false;
	}

	hyp_offset = *addr - hyp_base;

	*addr = kern_base + hyp_offset;

	return true;
}

void kvm_nvhe_dump_backtrace(unsigned long hyp_offset);

#endif	/* __KVM_NVHE_HYPERVISOR__ */
+41 −0
Original line number Diff line number Diff line
@@ -21,6 +21,47 @@

#include <asm/stacktrace/nvhe.h>

/*
 * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
 *
 * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
 * allow for guard pages below the stack. Consequently, the fixed offset address
 * translation macros won't work here.
 *
 * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
 * stack base.
 *
 * Returns true on success and updates @addr to its corresponding kernel VA;
 * otherwise returns false.
 */
static bool kvm_nvhe_stack_kern_va(unsigned long *addr,
				   enum stack_type type)
{
	struct kvm_nvhe_stacktrace_info *stacktrace_info;
	unsigned long hyp_base, kern_base, hyp_offset;

	stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);

	switch (type) {
	case STACK_TYPE_HYP:
		kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
		hyp_base = (unsigned long)stacktrace_info->stack_base;
		break;
	case STACK_TYPE_OVERFLOW:
		kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
		hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
		break;
	default:
		return false;
	}

	hyp_offset = *addr - hyp_base;

	*addr = kern_base + hyp_offset;

	return true;
}

static bool on_overflow_stack(unsigned long sp, unsigned long size,
			      struct stack_info *info)
{