Commit d8969871 authored by Maciej S. Szmigiero's avatar Maciej S. Szmigiero Committed by Paolo Bonzini
Browse files

KVM: selftests: nSVM: Add svm_nested_soft_inject_test



Add a KVM self-test that checks whether a nSVM L1 is able to successfully
inject a software interrupt, a soft exception and a NMI into its L2 guest.

In practice, this tests both the next_rip field consistency and
L1-injected event with intervening L0 VMEXIT during its delivery:
the first nested VMRUN (that's also trying to inject a software interrupt)
will immediately trigger a L0 NPF.
This L0 NPF will have zero in its CPU-returned next_rip field, which if
incorrectly reused by KVM will trigger a #PF when trying to return to
such address 0 from the interrupt handler.

For NMI injection this tests whether the L1 NMI state isn't getting
incorrectly mixed with the L2 NMI state if a L1 -> L2 NMI needs to be
re-injected.

Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
[sean: check exact L2 RIP on first soft interrupt]
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarMaciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <d5f3d56528558ad8e28a9f1e1e4187f5a1e6770a.1651440202.git.maciej.szmigiero@oracle.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 159fc6fa
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -36,9 +36,10 @@
/x86_64/state_test
/x86_64/svm_vmcall_test
/x86_64/svm_int_ctl_test
/x86_64/tsc_scaling_sync
/x86_64/svm_nested_soft_inject_test
/x86_64/sync_regs_test
/x86_64/tsc_msrs_test
/x86_64/tsc_scaling_sync
/x86_64/userspace_io_test
/x86_64/userspace_msr_exit_test
/x86_64/vmx_apic_access_test
+1 −0
Original line number Diff line number Diff line
@@ -66,6 +66,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_soft_inject_test
TEST_GEN_PROGS_x86_64 += x86_64/tsc_scaling_sync
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test
+17 −0
Original line number Diff line number Diff line
@@ -17,6 +17,8 @@

#include "../kvm_util.h"

#define NMI_VECTOR		0x02

#define X86_EFLAGS_FIXED	 (1u << 1)

#define X86_CR4_VME		(1ul << 0)
@@ -385,6 +387,21 @@ static inline void cpu_relax(void)
	asm volatile("rep; nop" ::: "memory");
}

#define vmmcall()		\
	__asm__ __volatile__(	\
		"vmmcall\n"	\
		)

#define ud2()			\
	__asm__ __volatile__(	\
		"ud2\n"	\
		)

#define hlt()			\
	__asm__ __volatile__(	\
		"hlt\n"	\
		)

bool is_intel_cpu(void);
bool is_amd_cpu(void);

+12 −0
Original line number Diff line number Diff line
@@ -16,6 +16,8 @@
#define CPUID_SVM_BIT		2
#define CPUID_SVM		BIT_ULL(CPUID_SVM_BIT)

#define SVM_EXIT_EXCP_BASE	0x040
#define SVM_EXIT_HLT		0x078
#define SVM_EXIT_MSR		0x07c
#define SVM_EXIT_VMMCALL	0x081

@@ -36,6 +38,16 @@ struct svm_test_data {
	uint64_t msr_gpa;
};

#define stgi()			\
	__asm__ __volatile__(	\
		"stgi\n"	\
		)

#define clgi()			\
	__asm__ __volatile__(	\
		"clgi\n"	\
		)

struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
+0 −1
Original line number Diff line number Diff line
@@ -19,7 +19,6 @@
#include "vmx.h"

#define VCPU_ID		5
#define NMI_VECTOR	2

static int ud_count;

Loading