Commit 16809ecd authored by Tom Lendacky's avatar Tom Lendacky Committed by Paolo Bonzini
Browse files

KVM: SVM: Provide an updated VMRUN invocation for SEV-ES guests



The run sequence is different for an SEV-ES guest compared to a legacy or
even an SEV guest. The guest vCPU register state of an SEV-ES guest will
be restored on VMRUN and saved on VMEXIT. There is no need to restore the
guest registers directly and through VMLOAD before VMRUN and no need to
save the guest registers directly and through VMSAVE on VMEXIT.

Update the svm_vcpu_run() function to skip register state saving and
restoring and provide an alternative function for running an SEV-ES guest
in vmenter.S

Additionally, certain host state is restored across an SEV-ES VMRUN. As
a result certain register states are not required to be restored upon
VMEXIT (e.g. FS, GS, etc.), so only do that if the guest is not an SEV-ES
guest.

Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Message-Id: <fb1c66d32f2194e171b95fc1a8affd6d326e10c1.1607620209.git.thomas.lendacky@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 86137773
Loading
Loading
Loading
Loading
+16 −9
Original line number Diff line number Diff line
@@ -3700,6 +3700,9 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
	guest_enter_irqoff();
	lockdep_hardirqs_on(CALLER_ADDR0);

	if (sev_es_guest(svm->vcpu.kvm)) {
		__svm_sev_es_vcpu_run(svm->vmcb_pa);
	} else {
		__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);

#ifdef CONFIG_X86_64
@@ -3710,6 +3713,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
		loadsegment(gs, svm->host.gs);
#endif
#endif
	}

	/*
	 * VMEXIT disables interrupts (host state), but tracing and lockdep
@@ -3807,14 +3811,17 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
	if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
		svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);

	if (!sev_es_guest(svm->vcpu.kvm))
		reload_tss(vcpu);

	x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);

	if (!sev_es_guest(svm->vcpu.kvm)) {
		vcpu->arch.cr2 = svm->vmcb->save.cr2;
		vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
		vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
		vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
	}

	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
		kvm_before_interrupt(&svm->vcpu);
+5 −0
Original line number Diff line number Diff line
@@ -591,4 +591,9 @@ void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
void sev_es_vcpu_put(struct vcpu_svm *svm);

/* vmenter.S */

void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);

#endif
+50 −0
Original line number Diff line number Diff line
@@ -168,3 +168,53 @@ SYM_FUNC_START(__svm_vcpu_run)
	pop %_ASM_BP
	ret
SYM_FUNC_END(__svm_vcpu_run)

/**
 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
 * @vmcb_pa:	unsigned long
 */
SYM_FUNC_START(__svm_sev_es_vcpu_run)
	push %_ASM_BP
#ifdef CONFIG_X86_64
	push %r15
	push %r14
	push %r13
	push %r12
#else
	push %edi
	push %esi
#endif
	push %_ASM_BX

	/* Enter guest mode */
	mov %_ASM_ARG1, %_ASM_AX
	sti

1:	vmrun %_ASM_AX
	jmp 3f
2:	cmpb $0, kvm_rebooting
	jne 3f
	ud2
	_ASM_EXTABLE(1b, 2b)

3:	cli

#ifdef CONFIG_RETPOLINE
	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
#endif

	pop %_ASM_BX

#ifdef CONFIG_X86_64
	pop %r12
	pop %r13
	pop %r14
	pop %r15
#else
	pop %esi
	pop %edi
#endif
	pop %_ASM_BP
	ret
SYM_FUNC_END(__svm_sev_es_vcpu_run)
+6 −0
Original line number Diff line number Diff line
@@ -880,6 +880,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);

void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
{
	if (vcpu->arch.guest_state_protected)
		return;

	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {

		if (vcpu->arch.xcr0 != host_xcr0)
@@ -900,6 +903,9 @@ EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);

void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
{
	if (vcpu->arch.guest_state_protected)
		return;

	if (static_cpu_has(X86_FEATURE_PKU) &&
	    (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
	     (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {