Commit 136a55c0 authored by Maxim Levitsky's avatar Maxim Levitsky Committed by Paolo Bonzini
Browse files

KVM: x86: nSVM: refactor svm_leave_smm and smm_enter_smm



Use return statements instead of nested if, and fix error
path to free all the maps that were allocated.

Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210913140954.165665-2-mlevitsk@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e85d3e7b
Loading
Loading
Loading
Loading
+69 −66
Original line number Diff line number Diff line
@@ -4285,7 +4285,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
	struct kvm_host_map map_save;
	int ret;

	if (is_guest_mode(vcpu)) {
	if (!is_guest_mode(vcpu))
		return 0;

	/* FED8h - SVM Guest */
	put_smstate(u64, smstate, 0x7ed8, 1);
	/* FEE0h - SVM Guest VMCB Physical Address */
@@ -4321,7 +4323,6 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
			     &svm->vmcb01.ptr->save);

	kvm_vcpu_unmap(vcpu, &map_save, true);
	}
	return 0;
}

@@ -4329,52 +4330,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct kvm_host_map map, map_save;
	int ret = 0;

	if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
		u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
		u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
		u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
	u64 saved_efer, vmcb12_gpa;
	struct vmcb *vmcb12;
	int ret;

	if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
		return 0;

	/* Non-zero if SMI arrived while vCPU was in guest mode. */
	if (!GET_SMSTATE(u64, smstate, 0x7ed8))
		return 0;

		if (guest) {
	if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
		return 1;

	saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
	if (!(saved_efer & EFER_SVME))
		return 1;

			if (kvm_vcpu_map(vcpu,
					 gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
	vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
	if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
		return 1;

			if (svm_allocate_nested(svm))
				return 1;
	ret = 1;
	if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
		goto unmap_map;

			kvm_vcpu_unmap(vcpu, &map, true);
	if (svm_allocate_nested(svm))
		goto unmap_save;

	/*
	 * Restore L1 host state from L1 HSAVE area as VMCB01 was
	 * used during SMM (see svm_enter_smm())
	 */
			if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
					 &map_save) == -EINVAL)
				return 1;

			svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
					     map_save.hva + 0x400);
	svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);

	/*
	 * Enter the nested guest now
	 */

	vmcb12 = map.hva;
	nested_load_control_from_vmcb12(svm, &vmcb12->control);
	ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);

unmap_save:
	kvm_vcpu_unmap(vcpu, &map_save, true);
		}
	}

unmap_map:
	kvm_vcpu_unmap(vcpu, &map, true);
	return ret;
}