Commit e61ab42d authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: SVM: move guest vmsave/vmload back to assembly

It is error-prone that code after vmexit cannot access percpu data
because GSBASE has not been restored yet.  It forces MSR_IA32_SPEC_CTRL
save/restore to happen very late, after the predictor untraining
sequence, and it gets in the way of return stack depth tracking
(a retbleed mitigation that is in linux-next as of 2022-11-09).

As a first step towards fixing that, move the VMCB VMSAVE/VMLOAD to
assembly, essentially undoing commit fb0c4a4f ("KVM: SVM: move
VMLOAD/VMSAVE to C code", 2021-03-15).  The reason for that commit was
that it made it simpler to use a different VMCB for VMLOAD/VMSAVE versus
VMRUN; but that is not a big hassle anymore thanks to the kvm-asm-offsets
machinery and other related cleanups.

The idea on how to number the exception tables is stolen from
a prototype patch by Peter Zijlstra.

Cc: stable@vger.kernel.org
Fixes: a149180f ("x86: Add magic AMD return-thunk")
Link: <https://lore.kernel.org/all/f571e404-e625-bae1-10e9-449b2eb4cbd8@citrix.com/

>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 73412dfe
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@ static void __used common(void)
		BLANK();
		OFFSET(SVM_vcpu_arch_regs, vcpu_svm, vcpu.arch.regs);
		OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
		OFFSET(SVM_vmcb01, vcpu_svm, vmcb01);
		OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
	}

+0 −9
Original line number Diff line number Diff line
@@ -3910,16 +3910,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
	} else {
		struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);

		/*
		 * Use a single vmcb (vmcb01 because it's always valid) for
		 * context switching guest state via VMLOAD/VMSAVE, that way
		 * the state doesn't need to be copied between vmcb01 and
		 * vmcb02 when switching vmcbs for nested virtualization.
		 */
		vmload(svm->vmcb01.pa);
		__svm_vcpu_run(svm);
		vmsave(svm->vmcb01.pa);

		vmload(__sme_page_pa(sd->save_area));
	}

+38 −11
Original line number Diff line number Diff line
@@ -28,6 +28,8 @@
#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
#endif

#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)

.section .noinstr.text, "ax"

/**
@@ -55,6 +57,16 @@ SYM_FUNC_START(__svm_vcpu_run)
	mov %_ASM_ARG1, %_ASM_DI
.endif

	/*
	 * Use a single vmcb (vmcb01 because it's always valid) for
	 * context switching guest state via VMLOAD/VMSAVE, that way
	 * the state doesn't need to be copied between vmcb01 and
	 * vmcb02 when switching vmcbs for nested virtualization.
	 */
	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
1:	vmload %_ASM_AX
2:

	/* Get svm->current_vmcb->pa into RAX. */
	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
@@ -80,16 +92,11 @@ SYM_FUNC_START(__svm_vcpu_run)
	/* Enter guest mode */
	sti

1:	vmrun %_ASM_AX

2:	cli

#ifdef CONFIG_RETPOLINE
	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
#endif
3:	vmrun %_ASM_AX
4:
	cli

	/* "POP" @svm to RAX. */
	/* Pop @svm to RAX while it's the only available register. */
	pop %_ASM_AX

	/* Save all guest registers.  */
@@ -110,6 +117,18 @@ SYM_FUNC_START(__svm_vcpu_run)
	mov %r15, VCPU_R15(%_ASM_AX)
#endif

	/* @svm can stay in RDI from now on.  */
	mov %_ASM_AX, %_ASM_DI

	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
5:	vmsave %_ASM_AX
6:

#ifdef CONFIG_RETPOLINE
	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
#endif

	/*
	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
	 * untrained as soon as we exit the VM and are back to the
@@ -159,11 +178,19 @@ SYM_FUNC_START(__svm_vcpu_run)
	pop %_ASM_BP
	RET

3:	cmpb $0, kvm_rebooting
10:	cmpb $0, kvm_rebooting
	jne 2b
	ud2
30:	cmpb $0, kvm_rebooting
	jne 4b
	ud2
50:	cmpb $0, kvm_rebooting
	jne 6b
	ud2

	_ASM_EXTABLE(1b, 3b)
	_ASM_EXTABLE(1b, 10b)
	_ASM_EXTABLE(3b, 30b)
	_ASM_EXTABLE(5b, 50b)

SYM_FUNC_END(__svm_vcpu_run)