Commit 987b2594 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86: Move kvm_vcpu_init() invocation to common code



Move the kvm_cpu_{un}init() calls to common x86 code as an intermediate
step to removing kvm_cpu_{un}init() altogether.

Note, VMX'x alloc_apic_access_page() and init_rmode_identity_map() are
per-VM allocations and are intentionally kept if vCPU creation fails.
They are freed by kvm_arch_destroy_vm().

No functional change intended.

Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d813a8ba
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1050,7 +1050,7 @@ struct kvm_x86_ops {
	void (*vm_destroy)(struct kvm *kvm);

	/* Create, but do not attach this VCPU */
	int (*vcpu_create)(struct kvm *kvm, struct kvm_vcpu *vcpu, unsigned id);
	int (*vcpu_create)(struct kvm_vcpu *vcpu);
	void (*vcpu_free)(struct kvm_vcpu *vcpu);
	void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);

+3 −10
Original line number Diff line number Diff line
@@ -2187,8 +2187,7 @@ static int avic_init_vcpu(struct vcpu_svm *svm)
	return ret;
}

static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
			   unsigned int id)
static int svm_create_vcpu(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm;
	struct page *page;
@@ -2200,14 +2199,10 @@ static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
	BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
	svm = to_svm(vcpu);

	err = kvm_vcpu_init(vcpu, kvm, id);
	if (err)
		return err;

	err = -ENOMEM;
	page = alloc_page(GFP_KERNEL_ACCOUNT);
	if (!page)
		goto uninit;
		goto out;

	msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
	if (!msrpm_pages)
@@ -2256,8 +2251,7 @@ static int svm_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
	__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
free_page1:
	__free_page(page);
uninit:
	kvm_vcpu_uninit(vcpu);
out:
	return err;
}

@@ -2284,7 +2278,6 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
	__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
	__free_page(virt_to_page(svm->nested.hsave));
	__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
	kvm_vcpu_uninit(vcpu);
}

static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+6 −13
Original line number Diff line number Diff line
@@ -6681,11 +6681,9 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
	free_vpid(vmx->vpid);
	nested_vmx_free_vcpu(vcpu);
	free_loaded_vmcs(vmx->loaded_vmcs);
	kvm_vcpu_uninit(vcpu);
}

static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
			   unsigned int id)
static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx;
	unsigned long *msr_bitmap;
@@ -6694,10 +6692,6 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
	BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
	vmx = to_vmx(vcpu);

	err = kvm_vcpu_init(vcpu, kvm, id);
	if (err)
		return err;

	err = -ENOMEM;

	vmx->vpid = allocate_vpid();
@@ -6711,7 +6705,7 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
	if (enable_pml) {
		vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
		if (!vmx->pml_pg)
			goto uninit_vcpu;
			goto free_vpid;
	}

	BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) != NR_SHARED_MSRS);
@@ -6756,7 +6750,7 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
	vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
	vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
	vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
	if (kvm_cstate_in_guest(kvm)) {
	if (kvm_cstate_in_guest(vcpu->kvm)) {
		vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C1_RES, MSR_TYPE_R);
		vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C3_RESIDENCY, MSR_TYPE_R);
		vmx_disable_intercept_for_msr(msr_bitmap, MSR_CORE_C6_RESIDENCY, MSR_TYPE_R);
@@ -6772,13 +6766,13 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
	vmx_vcpu_put(vcpu);
	put_cpu();
	if (cpu_need_virtualize_apic_accesses(vcpu)) {
		err = alloc_apic_access_page(kvm);
		err = alloc_apic_access_page(vcpu->kvm);
		if (err)
			goto free_vmcs;
	}

	if (enable_ept && !enable_unrestricted_guest) {
		err = init_rmode_identity_map(kvm);
		err = init_rmode_identity_map(vcpu->kvm);
		if (err)
			goto free_vmcs;
	}
@@ -6810,8 +6804,7 @@ static int vmx_create_vcpu(struct kvm *kvm, struct kvm_vcpu *vcpu,
	free_loaded_vmcs(vmx->loaded_vmcs);
free_pml:
	vmx_destroy_pml_buffer(vmx);
uninit_vcpu:
	kvm_vcpu_uninit(vcpu);
free_vpid:
	free_vpid(vmx->vpid);
	return err;
}
+15 −5
Original line number Diff line number Diff line
@@ -9176,6 +9176,8 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)

	kvm_x86_ops->vcpu_free(vcpu);

	kvm_vcpu_uninit(vcpu);

	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
	kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
	kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
@@ -9197,13 +9199,21 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
	if (!vcpu)
		return ERR_PTR(-ENOMEM);

	r = kvm_x86_ops->vcpu_create(kvm, vcpu, id);
	if (r) {
	r = kvm_vcpu_init(vcpu, kvm, id);
	if (r)
		goto free_vcpu;

	r = kvm_x86_ops->vcpu_create(vcpu);
	if (r)
		goto uninit_vcpu;
	return vcpu;

uninit_vcpu:
	kvm_vcpu_uninit(vcpu);
free_vcpu:
	kmem_cache_free(kvm_vcpu_cache, vcpu);
	return ERR_PTR(r);
}
	return vcpu;
}

int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{