Commit 35a78319 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: SVM: Use asm goto to handle unexpected #UD on SVM instructions



Add svm_asm*() macros, a la the existing vmx_asm*() macros, to handle
faults on SVM instructions instead of using the generic __ex(), a.k.a.
__kvm_handle_fault_on_reboot().  Using asm goto generates slightly
better code as it eliminates the in-line JMP+CALL sequences that are
needed by __kvm_handle_fault_on_reboot() to avoid triggering BUG()
from fixup (which generates bad stack traces).

Using SVM specific macros also drops the last user of __ex() and the
the last asm linkage to kvm_spurious_fault(), and adds a helper for
VMSAVE, which may gain an addition call site in the future (as part
of optimizing the SVM context switching).

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20201231002702.22237077-8-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6a289139
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@

#include "x86.h"
#include "svm.h"
#include "svm_ops.h"
#include "cpuid.h"
#include "trace.h"

@@ -2076,7 +2077,7 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
	 * of which one step is to perform a VMLOAD. Since hardware does not
	 * perform a VMSAVE on VMRUN, the host savearea must be updated.
	 */
	asm volatile(__ex("vmsave %0") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
	vmsave(__sme_page_pa(sd->save_area));

	/*
	 * Certain MSRs are restored on VMEXIT, only save ones that aren't
+1 −15
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@
#include "trace.h"

#include "svm.h"
#include "svm_ops.h"

#define __ex(x) __kvm_handle_fault_on_reboot(x)

@@ -248,21 +249,6 @@ u32 svm_msrpm_offset(u32 msr)

#define MAX_INST_SIZE 15

static inline void clgi(void)
{
	asm volatile (__ex("clgi"));
}

static inline void stgi(void)
{
	asm volatile (__ex("stgi"));
}

static inline void invlpga(unsigned long addr, u32 asid)
{
	asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
}

static int get_max_npt_level(void)
{
#ifdef CONFIG_X86_64
+64 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_SVM_OPS_H
#define __KVM_X86_SVM_OPS_H

#include <linux/compiler_types.h>

#include <asm/kvm_host.h>

#define svm_asm(insn, clobber...)				\
do {								\
	asm_volatile_goto("1: " __stringify(insn) "\n\t"	\
			  _ASM_EXTABLE(1b, %l[fault])		\
			  ::: clobber : fault);			\
	return;							\
fault:								\
	kvm_spurious_fault();					\
} while (0)

#define svm_asm1(insn, op1, clobber...)				\
do {								\
	asm_volatile_goto("1: "  __stringify(insn) " %0\n\t"	\
			  _ASM_EXTABLE(1b, %l[fault])		\
			  :: op1 : clobber : fault);		\
	return;							\
fault:								\
	kvm_spurious_fault();					\
} while (0)

#define svm_asm2(insn, op1, op2, clobber...)				\
do {									\
	asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"	\
			  _ASM_EXTABLE(1b, %l[fault])			\
			  :: op1, op2 : clobber : fault);		\
	return;								\
fault:									\
	kvm_spurious_fault();						\
} while (0)

static inline void clgi(void)
{
	svm_asm(clgi);
}

static inline void stgi(void)
{
	svm_asm(stgi);
}

static inline void invlpga(unsigned long addr, u32 asid)
{
	svm_asm2(invlpga, "c"(asid), "a"(addr));
}

/*
 * Despite being a physical address, the portion of rAX that is consumed by
 * VMSAVE, VMLOAD, etc... is still controlled by the effective address size,
 * hence 'unsigned long' instead of 'hpa_t'.
 */
static inline void vmsave(unsigned long pa)
{
	svm_asm1(vmsave, "a" (pa), "memory");
}

#endif /* __KVM_X86_SVM_OPS_H */