Commit e287bd00 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: SVM: restore host save area from assembly



Allow access to the percpu area via the GS segment base, which is
needed in order to access the saved host spec_ctrl value.  In linux-next
FILL_RETURN_BUFFER also needs to access percpu data.

For simplicity, the physical address of the save area is added to struct
svm_cpu_data.

Cc: stable@vger.kernel.org
Fixes: a149180f ("x86: Add magic AMD return-thunk")
Reported-by: default avatarNathan Chancellor <nathan@kernel.org>
Analyzed-by: default avatarAndrew Cooper <andrew.cooper3@citrix.com>
Tested-by: default avatarNathan Chancellor <nathan@kernel.org>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e61ab42d
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -18,6 +18,7 @@ static void __used common(void)
		OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
		OFFSET(SVM_current_vmcb, vcpu_svm, current_vmcb);
		OFFSET(SVM_vmcb01, vcpu_svm, vmcb01);
		OFFSET(SVM_vmcb01, vcpu_svm, vmcb01);
		OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
		OFFSET(KVM_VMCB_pa, kvm_vmcb_info, pa);
		OFFSET(SD_save_area_pa, svm_cpu_data, save_area_pa);
	}
	}


	if (IS_ENABLED(CONFIG_KVM_INTEL)) {
	if (IS_ENABLED(CONFIG_KVM_INTEL)) {
+6 −8
Original line number Original line Diff line number Diff line
@@ -592,7 +592,7 @@ static int svm_hardware_enable(void)


	wrmsrl(MSR_EFER, efer | EFER_SVME);
	wrmsrl(MSR_EFER, efer | EFER_SVME);


	wrmsrl(MSR_VM_HSAVE_PA, __sme_page_pa(sd->save_area));
	wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);


	if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		/*
		/*
@@ -648,6 +648,7 @@ static void svm_cpu_uninit(int cpu)


	kfree(sd->sev_vmcbs);
	kfree(sd->sev_vmcbs);
	__free_page(sd->save_area);
	__free_page(sd->save_area);
	sd->save_area_pa = 0;
	sd->save_area = NULL;
	sd->save_area = NULL;
}
}


@@ -665,6 +666,7 @@ static int svm_cpu_init(int cpu)
	if (ret)
	if (ret)
		goto free_save_area;
		goto free_save_area;


	sd->save_area_pa = __sme_page_pa(sd->save_area);
	return 0;
	return 0;


free_save_area:
free_save_area:
@@ -1450,7 +1452,7 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
	 * Save additional host state that will be restored on VMEXIT (sev-es)
	 * Save additional host state that will be restored on VMEXIT (sev-es)
	 * or subsequent vmload of host save area.
	 * or subsequent vmload of host save area.
	 */
	 */
	vmsave(__sme_page_pa(sd->save_area));
	vmsave(sd->save_area_pa);
	if (sev_es_guest(vcpu->kvm)) {
	if (sev_es_guest(vcpu->kvm)) {
		struct sev_es_save_area *hostsa;
		struct sev_es_save_area *hostsa;
		hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
		hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
@@ -3905,14 +3907,10 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)


	guest_state_enter_irqoff();
	guest_state_enter_irqoff();


	if (sev_es_guest(vcpu->kvm)) {
	if (sev_es_guest(vcpu->kvm))
		__svm_sev_es_vcpu_run(svm);
		__svm_sev_es_vcpu_run(svm);
	} else {
	else
		struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);

		__svm_vcpu_run(svm);
		__svm_vcpu_run(svm);
		vmload(__sme_page_pa(sd->save_area));
	}


	guest_state_exit_irqoff();
	guest_state_exit_irqoff();
}
}
+2 −0
Original line number Original line Diff line number Diff line
@@ -287,6 +287,8 @@ struct svm_cpu_data {
	struct kvm_ldttss_desc *tss_desc;
	struct kvm_ldttss_desc *tss_desc;


	struct page *save_area;
	struct page *save_area;
	unsigned long save_area_pa;

	struct vmcb *current_vmcb;
	struct vmcb *current_vmcb;


	/* index = sev_asid, value = vmcb pointer */
	/* index = sev_asid, value = vmcb pointer */
+0 −5
Original line number Original line Diff line number Diff line
@@ -61,9 +61,4 @@ static __always_inline void vmsave(unsigned long pa)
	svm_asm1(vmsave, "a" (pa), "memory");
	svm_asm1(vmsave, "a" (pa), "memory");
}
}


static __always_inline void vmload(unsigned long pa)
{
	svm_asm1(vmload, "a" (pa), "memory");
}

#endif /* __KVM_X86_SVM_OPS_H */
#endif /* __KVM_X86_SVM_OPS_H */
+17 −0
Original line number Original line Diff line number Diff line
@@ -49,6 +49,14 @@ SYM_FUNC_START(__svm_vcpu_run)
#endif
#endif
	push %_ASM_BX
	push %_ASM_BX


	/*
	 * Save variables needed after vmexit on the stack, in inverse
	 * order compared to when they are needed.
	 */

	/* Needed to restore access to percpu variables.  */
	__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)

	/* Save @svm. */
	/* Save @svm. */
	push %_ASM_ARG1
	push %_ASM_ARG1


@@ -124,6 +132,11 @@ SYM_FUNC_START(__svm_vcpu_run)
5:	vmsave %_ASM_AX
5:	vmsave %_ASM_AX
6:
6:


	/* Restores GSBASE among other things, allowing access to percpu data.  */
	pop %_ASM_AX
7:	vmload %_ASM_AX
8:

#ifdef CONFIG_RETPOLINE
#ifdef CONFIG_RETPOLINE
	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
@@ -187,10 +200,14 @@ SYM_FUNC_START(__svm_vcpu_run)
50:	cmpb $0, kvm_rebooting
50:	cmpb $0, kvm_rebooting
	jne 6b
	jne 6b
	ud2
	ud2
70:	cmpb $0, kvm_rebooting
	jne 8b
	ud2


	_ASM_EXTABLE(1b, 10b)
	_ASM_EXTABLE(1b, 10b)
	_ASM_EXTABLE(3b, 30b)
	_ASM_EXTABLE(3b, 30b)
	_ASM_EXTABLE(5b, 50b)
	_ASM_EXTABLE(5b, 50b)
	_ASM_EXTABLE(7b, 70b)


SYM_FUNC_END(__svm_vcpu_run)
SYM_FUNC_END(__svm_vcpu_run)