Commit d89d04ab authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: move EXIT_FASTPATH_REENTER_GUEST to common code



Now that KVM is using static calls, calling vmx_vcpu_run and
vmx_sync_pir_to_irr does not incur anymore the cost of a
retpoline.

Therefore there is no need anymore to handle EXIT_FASTPATH_REENTER_GUEST
in vendor code.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent fb18d053
Loading
Loading
Loading
Loading
+1 −18
Original line number Diff line number Diff line
@@ -6711,11 +6711,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,

static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
	fastpath_t exit_fastpath;
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	unsigned long cr3, cr4;

reenter_guest:
	/* Record the guest's net vcpu time for enforced NMI injections. */
	if (unlikely(!enable_vnmi &&
		     vmx->loaded_vmcs->soft_vnmi_blocked))
@@ -6865,22 +6863,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
	if (is_guest_mode(vcpu))
		return EXIT_FASTPATH_NONE;

	exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
	if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
		if (!kvm_vcpu_exit_request(vcpu)) {
			/*
			 * FIXME: this goto should be a loop in vcpu_enter_guest,
			 * but it would incur the cost of a retpoline for now.
			 * Revisit once static calls are available.
			 */
			if (vcpu->arch.apicv_active)
				vmx_sync_pir_to_irr(vcpu);
			goto reenter_guest;
		}
		exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
	}

	return exit_fastpath;
	return vmx_exit_handlers_fastpath(vcpu);
}

static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+14 −3
Original line number Diff line number Diff line
@@ -1796,12 +1796,11 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);

bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{
	return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
		xfer_to_guest_mode_work_pending();
}
EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);

/*
 * The fast path for frequent and performance sensitive wrmsr emulation,
@@ -9044,7 +9043,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
	}

	for (;;) {
		exit_fastpath = static_call(kvm_x86_run)(vcpu);
		if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
			break;

                if (unlikely(kvm_vcpu_exit_request(vcpu))) {
			exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
			break;
		}

		if (vcpu->arch.apicv_active)
			static_call(kvm_x86_sync_pir_to_irr)(vcpu);
        }

	/*
	 * Do this here before restoring debug registers on the host.  And
+0 −1
Original line number Diff line number Diff line
@@ -395,7 +395,6 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
int kvm_spec_ctrl_test_value(u64 value);
bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
			      struct x86_exception *e);
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);