Commit 63129754 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: SVM: Pass struct kvm_vcpu to exit handlers (and many, many other places)



Refactor the svm_exit_handlers API to pass @vcpu instead of @svm to
allow directly invoking common x86 exit handlers (in a future patch).
Opportunistically convert an absurd number of instances of 'svm->vcpu'
to direct uses of 'vcpu' to avoid pointless casting.

No functional change intended.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210205005750.3841462-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2a32a77c
Loading
Loading
Loading
Loading
+13 −11
Original line number Diff line number Diff line
@@ -270,7 +270,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
	if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
		return -EINVAL;

	if (!svm->vcpu.arch.apic->regs)
	if (!vcpu->arch.apic->regs)
		return -EINVAL;

	if (kvm_apicv_activated(vcpu->kvm)) {
@@ -281,7 +281,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
			return ret;
	}

	svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
	svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);

	/* Setting AVIC backing page address in the phy APIC ID table */
	entry = avic_get_physical_id_entry(vcpu, id);
@@ -315,15 +315,16 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
	}
}

int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
	u32 icrl = svm->vmcb->control.exit_info_1;
	u32 id = svm->vmcb->control.exit_info_2 >> 32;
	u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
	struct kvm_lapic *apic = svm->vcpu.arch.apic;
	struct kvm_lapic *apic = vcpu->arch.apic;

	trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
	trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);

	switch (id) {
	case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
@@ -347,11 +348,11 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
		 * set the appropriate IRR bits on the valid target
		 * vcpus. So, we just need to kick the appropriate vcpu.
		 */
		avic_kick_target_vcpus(svm->vcpu.kvm, apic, icrl, icrh);
		avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
		break;
	case AVIC_IPI_FAILURE_INVALID_TARGET:
		WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
			  index, svm->vcpu.vcpu_id, icrh, icrl);
			  index, vcpu->vcpu_id, icrh, icrl);
		break;
	case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
		WARN_ONCE(1, "Invalid backing page\n");
@@ -539,8 +540,9 @@ static bool is_avic_unaccelerated_access_trap(u32 offset)
	return ret;
}

int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	int ret = 0;
	u32 offset = svm->vmcb->control.exit_info_1 &
		     AVIC_UNACCEL_ACCESS_OFFSET_MASK;
@@ -550,7 +552,7 @@ int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
		     AVIC_UNACCEL_ACCESS_WRITE_MASK;
	bool trap = is_avic_unaccelerated_access_trap(offset);

	trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
	trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
					    trap, write, vector);
	if (trap) {
		/* Handling Trap */
@@ -558,7 +560,7 @@ int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
		ret = avic_unaccel_trap_write(svm);
	} else {
		/* Handling Fault */
		ret = kvm_emulate_instruction(&svm->vcpu, 0);
		ret = kvm_emulate_instruction(vcpu, 0);
	}

	return ret;
@@ -572,7 +574,7 @@ int avic_init_vcpu(struct vcpu_svm *svm)
	if (!avic || !irqchip_in_kernel(vcpu->kvm))
		return 0;

	ret = avic_init_backing_page(&svm->vcpu);
	ret = avic_init_backing_page(vcpu);
	if (ret)
		return ret;

+65 −63
Original line number Diff line number Diff line
@@ -247,11 +247,9 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
	return true;
}

static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm,
static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
				      struct vmcb_save_area *save)
{
	struct kvm_vcpu *vcpu = &svm->vcpu;

	/*
	 * These checks are also performed by KVM_SET_SREGS,
	 * except that EFER.LMA is not checked by SVM against
@@ -271,7 +269,7 @@ static bool nested_vmcb_check_cr3_cr4(struct vcpu_svm *svm,
}

/* Common checks that apply to both L1 and L2 state.  */
static bool nested_vmcb_valid_sregs(struct vcpu_svm *svm,
static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
				    struct vmcb_save_area *save)
{
	if (CC(!(save->efer & EFER_SVME)))
@@ -284,18 +282,18 @@ static bool nested_vmcb_valid_sregs(struct vcpu_svm *svm,
	if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
		return false;

	if (!nested_vmcb_check_cr3_cr4(svm, save))
	if (!nested_vmcb_check_cr3_cr4(vcpu, save))
		return false;

	if (CC(!kvm_valid_efer(&svm->vcpu, save->efer)))
	if (CC(!kvm_valid_efer(vcpu, save->efer)))
		return false;

	return true;
}

static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
static bool nested_vmcb_checks(struct kvm_vcpu *vcpu, struct vmcb *vmcb12)
{
	if (!nested_vmcb_valid_sregs(svm, &vmcb12->save))
	if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save))
		return false;

	return nested_vmcb_check_controls(&vmcb12->control);
@@ -514,9 +512,10 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
	recalc_intercepts(svm);
}

int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
			 struct vmcb *vmcb12)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	int ret;

	trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
@@ -550,44 +549,45 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
		return ret;

	if (!npt_enabled)
		svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
		vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;

	svm_set_gif(svm, true);

	return 0;
}

int nested_svm_vmrun(struct vcpu_svm *svm)
int nested_svm_vmrun(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	int ret;
	struct vmcb *vmcb12;
	struct kvm_host_map map;
	u64 vmcb12_gpa;

	++svm->vcpu.stat.nested_run;
	++vcpu->stat.nested_run;

	if (is_smm(&svm->vcpu)) {
		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
	if (is_smm(vcpu)) {
		kvm_queue_exception(vcpu, UD_VECTOR);
		return 1;
	}

	vmcb12_gpa = svm->vmcb->save.rax;
	ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
	ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
	if (ret == -EINVAL) {
		kvm_inject_gp(&svm->vcpu, 0);
		kvm_inject_gp(vcpu, 0);
		return 1;
	} else if (ret) {
		return kvm_skip_emulated_instruction(&svm->vcpu);
		return kvm_skip_emulated_instruction(vcpu);
	}

	ret = kvm_skip_emulated_instruction(&svm->vcpu);
	ret = kvm_skip_emulated_instruction(vcpu);

	vmcb12 = map.hva;

	if (WARN_ON_ONCE(!svm->nested.initialized))
		return -EINVAL;

	if (!nested_vmcb_checks(svm, vmcb12)) {
	if (!nested_vmcb_checks(vcpu, vmcb12)) {
		vmcb12->control.exit_code    = SVM_EXIT_ERR;
		vmcb12->control.exit_code_hi = 0;
		vmcb12->control.exit_info_1  = 0;
@@ -597,25 +597,25 @@ int nested_svm_vmrun(struct vcpu_svm *svm)


	/* Clear internal status */
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);
	kvm_clear_exception_queue(vcpu);
	kvm_clear_interrupt_queue(vcpu);

	/*
	 * Since vmcb01 is not in use, we can use it to store some of the L1
	 * state.
	 */
	svm->vmcb01.ptr->save.efer   = svm->vcpu.arch.efer;
	svm->vmcb01.ptr->save.cr0    = kvm_read_cr0(&svm->vcpu);
	svm->vmcb01.ptr->save.cr4    = svm->vcpu.arch.cr4;
	svm->vmcb01.ptr->save.rflags = kvm_get_rflags(&svm->vcpu);
	svm->vmcb01.ptr->save.rip    = kvm_rip_read(&svm->vcpu);
	svm->vmcb01.ptr->save.efer   = vcpu->arch.efer;
	svm->vmcb01.ptr->save.cr0    = kvm_read_cr0(vcpu);
	svm->vmcb01.ptr->save.cr4    = vcpu->arch.cr4;
	svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu);
	svm->vmcb01.ptr->save.rip    = kvm_rip_read(vcpu);

	if (!npt_enabled)
		svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(&svm->vcpu);
		svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu);

	svm->nested.nested_run_pending = 1;

	if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
	if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
		goto out_exit_err;

	if (nested_svm_vmrun_msrpm(svm))
@@ -632,7 +632,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
	nested_svm_vmexit(svm);

out:
	kvm_vcpu_unmap(&svm->vcpu, &map, true);
	kvm_vcpu_unmap(vcpu, &map, true);

	return ret;
}
@@ -655,26 +655,27 @@ void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)

int nested_svm_vmexit(struct vcpu_svm *svm)
{
	int rc;
	struct kvm_vcpu *vcpu = &svm->vcpu;
	struct vmcb *vmcb12;
	struct vmcb *vmcb = svm->vmcb;
	struct kvm_host_map map;
	int rc;

	rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
	rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
	if (rc) {
		if (rc == -EINVAL)
			kvm_inject_gp(&svm->vcpu, 0);
			kvm_inject_gp(vcpu, 0);
		return 1;
	}

	vmcb12 = map.hva;

	/* Exit Guest-Mode */
	leave_guest_mode(&svm->vcpu);
	leave_guest_mode(vcpu);
	svm->nested.vmcb12_gpa = 0;
	WARN_ON_ONCE(svm->nested.nested_run_pending);

	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);

	/* in case we halted in L2 */
	svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -688,14 +689,14 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
	vmcb12->save.gdtr   = vmcb->save.gdtr;
	vmcb12->save.idtr   = vmcb->save.idtr;
	vmcb12->save.efer   = svm->vcpu.arch.efer;
	vmcb12->save.cr0    = kvm_read_cr0(&svm->vcpu);
	vmcb12->save.cr3    = kvm_read_cr3(&svm->vcpu);
	vmcb12->save.cr0    = kvm_read_cr0(vcpu);
	vmcb12->save.cr3    = kvm_read_cr3(vcpu);
	vmcb12->save.cr2    = vmcb->save.cr2;
	vmcb12->save.cr4    = svm->vcpu.arch.cr4;
	vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
	vmcb12->save.rip    = kvm_rip_read(&svm->vcpu);
	vmcb12->save.rsp    = kvm_rsp_read(&svm->vcpu);
	vmcb12->save.rax    = kvm_rax_read(&svm->vcpu);
	vmcb12->save.rflags = kvm_get_rflags(vcpu);
	vmcb12->save.rip    = kvm_rip_read(vcpu);
	vmcb12->save.rsp    = kvm_rsp_read(vcpu);
	vmcb12->save.rax    = kvm_rax_read(vcpu);
	vmcb12->save.dr7    = vmcb->save.dr7;
	vmcb12->save.dr6    = svm->vcpu.arch.dr6;
	vmcb12->save.cpl    = vmcb->save.cpl;
@@ -744,13 +745,13 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
	/*
	 * Restore processor state that had been saved in vmcb01
	 */
	kvm_set_rflags(&svm->vcpu, svm->vmcb->save.rflags);
	svm_set_efer(&svm->vcpu, svm->vmcb->save.efer);
	svm_set_cr0(&svm->vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
	svm_set_cr4(&svm->vcpu, svm->vmcb->save.cr4);
	kvm_rax_write(&svm->vcpu, svm->vmcb->save.rax);
	kvm_rsp_write(&svm->vcpu, svm->vmcb->save.rsp);
	kvm_rip_write(&svm->vcpu, svm->vmcb->save.rip);
	kvm_set_rflags(vcpu, svm->vmcb->save.rflags);
	svm_set_efer(vcpu, svm->vmcb->save.efer);
	svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
	svm_set_cr4(vcpu, svm->vmcb->save.cr4);
	kvm_rax_write(vcpu, svm->vmcb->save.rax);
	kvm_rsp_write(vcpu, svm->vmcb->save.rsp);
	kvm_rip_write(vcpu, svm->vmcb->save.rip);

	svm->vcpu.arch.dr7 = DR7_FIXED_1;
	kvm_update_dr7(&svm->vcpu);
@@ -762,11 +763,11 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
				       vmcb12->control.exit_int_info_err,
				       KVM_ISA_SVM);

	kvm_vcpu_unmap(&svm->vcpu, &map, true);
	kvm_vcpu_unmap(vcpu, &map, true);

	nested_svm_uninit_mmu_context(&svm->vcpu);
	nested_svm_uninit_mmu_context(vcpu);

	rc = nested_svm_load_cr3(&svm->vcpu, svm->vmcb->save.cr3, false);
	rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false);
	if (rc)
		return 1;

@@ -775,8 +776,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
	 * doesn't end up in L1.
	 */
	svm->vcpu.arch.nmi_injected = false;
	kvm_clear_exception_queue(&svm->vcpu);
	kvm_clear_interrupt_queue(&svm->vcpu);
	kvm_clear_exception_queue(vcpu);
	kvm_clear_interrupt_queue(vcpu);

	return 0;
}
@@ -826,17 +827,19 @@ void svm_free_nested(struct vcpu_svm *svm)
 */
void svm_leave_nested(struct vcpu_svm *svm)
{
	if (is_guest_mode(&svm->vcpu)) {
	struct kvm_vcpu *vcpu = &svm->vcpu;

	if (is_guest_mode(vcpu)) {
		svm->nested.nested_run_pending = 0;
		leave_guest_mode(&svm->vcpu);
		leave_guest_mode(vcpu);

		svm_switch_vmcb(svm, &svm->nested.vmcb02);

		nested_svm_uninit_mmu_context(&svm->vcpu);
		nested_svm_uninit_mmu_context(vcpu);
		vmcb_mark_all_dirty(svm->vmcb);
	}

	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
	kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
}

static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
@@ -945,16 +948,15 @@ int nested_svm_exit_handled(struct vcpu_svm *svm)
	return vmexit;
}

int nested_svm_check_permissions(struct vcpu_svm *svm)
int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
{
	if (!(svm->vcpu.arch.efer & EFER_SVME) ||
	    !is_paging(&svm->vcpu)) {
		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
	if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
		kvm_queue_exception(vcpu, UD_VECTOR);
		return 1;
	}

	if (svm->vmcb->save.cpl) {
		kvm_inject_gp(&svm->vcpu, 0);
	if (to_svm(vcpu)->vmcb->save.cpl) {
		kvm_inject_gp(vcpu, 0);
		return 1;
	}

@@ -1265,7 +1267,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
	if (!(save->cr0 & X86_CR0_PG) ||
	    !(save->cr0 & X86_CR0_PE) ||
	    (save->rflags & X86_EFLAGS_VM) ||
	    !nested_vmcb_valid_sregs(svm, save))
	    !nested_vmcb_valid_sregs(vcpu, save))
		goto out_free;

	/*
+14 −13
Original line number Diff line number Diff line
@@ -1849,7 +1849,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
		vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
		vcpu->arch.regs[VCPU_REGS_RCX] = 0;

		ret = svm_invoke_exit_handler(svm, SVM_EXIT_CPUID);
		ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
		if (!ret) {
			ret = -EINVAL;
			break;
@@ -1899,8 +1899,9 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
	return ret;
}

int sev_handle_vmgexit(struct vcpu_svm *svm)
int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;
	u64 ghcb_gpa, exit_code;
	struct ghcb *ghcb;
@@ -1912,13 +1913,13 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
		return sev_handle_vmgexit_msr_protocol(svm);

	if (!ghcb_gpa) {
		vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB gpa is not set\n");
		vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
		return -EINVAL;
	}

	if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
	if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
		/* Unable to map GHCB from guest */
		vcpu_unimpl(&svm->vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
		vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
			    ghcb_gpa);
		return -EINVAL;
	}
@@ -1926,7 +1927,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
	svm->ghcb = svm->ghcb_map.hva;
	ghcb = svm->ghcb_map.hva;

	trace_kvm_vmgexit_enter(svm->vcpu.vcpu_id, ghcb);
	trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);

	exit_code = ghcb_get_sw_exit_code(ghcb);

@@ -1944,7 +1945,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
		if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
			break;

		ret = kvm_sev_es_mmio_read(&svm->vcpu,
		ret = kvm_sev_es_mmio_read(vcpu,
					   control->exit_info_1,
					   control->exit_info_2,
					   svm->ghcb_sa);
@@ -1953,19 +1954,19 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
		if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
			break;

		ret = kvm_sev_es_mmio_write(&svm->vcpu,
		ret = kvm_sev_es_mmio_write(vcpu,
					    control->exit_info_1,
					    control->exit_info_2,
					    svm->ghcb_sa);
		break;
	case SVM_VMGEXIT_NMI_COMPLETE:
		ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
		ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
		break;
	case SVM_VMGEXIT_AP_HLT_LOOP:
		ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
		ret = kvm_emulate_ap_reset_hold(vcpu);
		break;
	case SVM_VMGEXIT_AP_JUMP_TABLE: {
		struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
		struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;

		switch (control->exit_info_1) {
		case 0:
@@ -1990,12 +1991,12 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
		break;
	}
	case SVM_VMGEXIT_UNSUPPORTED_EVENT:
		vcpu_unimpl(&svm->vcpu,
		vcpu_unimpl(vcpu,
			    "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
			    control->exit_info_1, control->exit_info_2);
		break;
	default:
		ret = svm_invoke_exit_handler(svm, exit_code);
		ret = svm_invoke_exit_handler(vcpu, exit_code);
	}

	return ret;
+287 −278

File changed.

Preview size limit exceeded, changes collapsed.

+7 −7
Original line number Diff line number Diff line
@@ -405,7 +405,7 @@ bool svm_smi_blocked(struct kvm_vcpu *vcpu);
bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
void svm_set_gif(struct vcpu_svm *svm, bool value);
int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code);
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
			  int read, int write);

@@ -437,15 +437,15 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
	return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
}

int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, struct vmcb *vmcb12);
int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct vcpu_svm *svm);
int nested_svm_vmrun(struct kvm_vcpu *vcpu);
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
int nested_svm_vmexit(struct vcpu_svm *svm);
int nested_svm_exit_handled(struct vcpu_svm *svm);
int nested_svm_check_permissions(struct vcpu_svm *svm);
int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
			       bool has_error_code, u32 error_code);
int nested_svm_exit_special(struct vcpu_svm *svm);
@@ -492,8 +492,8 @@ void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm);
void avic_init_vmcb(struct vcpu_svm *svm);
void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
int avic_incomplete_ipi_interception(struct vcpu_svm *svm);
int avic_unaccelerated_access_interception(struct vcpu_svm *svm);
int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
int avic_init_vcpu(struct vcpu_svm *svm);
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void avic_vcpu_put(struct kvm_vcpu *vcpu);
@@ -566,7 +566,7 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu);
void __init sev_hardware_setup(void);
void sev_hardware_teardown(void);
void sev_free_vcpu(struct kvm_vcpu *vcpu);
int sev_handle_vmgexit(struct vcpu_svm *svm);
int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm);