Commit b3646477 authored by Jason Baron's avatar Jason Baron Committed by Paolo Bonzini
Browse files

KVM: x86: use static calls to reduce kvm_x86_ops overhead



Convert kvm_x86_ops to use static calls. Note that all kvm_x86_ops are
covered here except for 'pmu_ops and 'nested ops'.

Here are some numbers running cpuid in a loop of 1 million calls averaged
over 5 runs, measured in the vm (lower is better).

Intel Xeon 3000MHz:

           |default    |mitigations=off
-------------------------------------
vanilla    |.671s      |.486s
static call|.573s(-15%)|.458s(-6%)

AMD EPYC 2500MHz:

           |default    |mitigations=off
-------------------------------------
vanilla    |.710s      |.609s
static call|.664s(-6%) |.609s(0%)

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Signed-off-by: default avatarJason Baron <jbaron@akamai.com>
Message-Id: <e057bf1b8a7ad15652df6eeba3f907ae758d3399.1610680941.git.jbaron@akamai.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9af5471b
Loading
Loading
Loading
Loading
+3 −5
Original line number Diff line number Diff line
@@ -1374,7 +1374,7 @@ void kvm_arch_free_vm(struct kvm *kvm);
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
	if (kvm_x86_ops.tlb_remote_flush &&
	    !kvm_x86_ops.tlb_remote_flush(kvm))
	    !static_call(kvm_x86_tlb_remote_flush)(kvm))
		return 0;
	else
		return -ENOTSUPP;
@@ -1767,14 +1767,12 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)

static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops.vcpu_blocking)
		kvm_x86_ops.vcpu_blocking(vcpu);
	static_call_cond(kvm_x86_vcpu_blocking)(vcpu);
}

static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	if (kvm_x86_ops.vcpu_unblocking)
		kvm_x86_ops.vcpu_unblocking(vcpu);
	static_call_cond(kvm_x86_vcpu_unblocking)(vcpu);
}

static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+1 −1
Original line number Diff line number Diff line
@@ -182,7 +182,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
	vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);

	/* Invoke the vendor callback only after the above state is updated. */
	kvm_x86_ops.vcpu_after_set_cpuid(vcpu);
	static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
}

static int is_efer_nx(void)
+2 −2
Original line number Diff line number Diff line
@@ -1154,7 +1154,7 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
		addr = gfn_to_hva(kvm, gfn);
		if (kvm_is_error_hva(addr))
			return 1;
		kvm_x86_ops.patch_hypercall(vcpu, instructions);
		static_call(kvm_x86_patch_hypercall)(vcpu, instructions);
		((unsigned char *)instructions)[3] = 0xc3; /* ret */
		if (__copy_to_user((void __user *)addr, instructions, 4))
			return 1;
@@ -1745,7 +1745,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
	 * hypercall generates UD from non zero cpl and real mode
	 * per HYPER-V spec
	 */
	if (kvm_x86_ops.get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
	if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
		kvm_queue_exception(vcpu, UD_VECTOR);
		return 1;
	}
+1 −2
Original line number Diff line number Diff line
@@ -143,8 +143,7 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
	__kvm_migrate_apic_timer(vcpu);
	__kvm_migrate_pit_timer(vcpu);
	if (kvm_x86_ops.migrate_timers)
		kvm_x86_ops.migrate_timers(vcpu);
	static_call_cond(kvm_x86_migrate_timers)(vcpu);
}

bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
+5 −5
Original line number Diff line number Diff line
@@ -68,7 +68,7 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
		return 0;

	if (!kvm_register_is_available(vcpu, reg))
		kvm_x86_ops.cache_reg(vcpu, reg);
		static_call(kvm_x86_cache_reg)(vcpu, reg);

	return vcpu->arch.regs[reg];
}
@@ -108,7 +108,7 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
	might_sleep();  /* on svm */

	if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_PDPTR);
		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_PDPTR);

	return vcpu->arch.walk_mmu->pdptrs[index];
}
@@ -118,7 +118,7 @@ static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
	ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
	if ((tmask & vcpu->arch.cr0_guest_owned_bits) &&
	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR0))
		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR0);
		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR0);
	return vcpu->arch.cr0 & mask;
}

@@ -132,14 +132,14 @@ static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
	ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
	if ((tmask & vcpu->arch.cr4_guest_owned_bits) &&
	    !kvm_register_is_available(vcpu, VCPU_EXREG_CR4))
		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR4);
		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR4);
	return vcpu->arch.cr4 & mask;
}

static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
{
	if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3))
		kvm_x86_ops.cache_reg(vcpu, VCPU_EXREG_CR3);
		static_call(kvm_x86_cache_reg)(vcpu, VCPU_EXREG_CR3);
	return vcpu->arch.cr3;
}

Loading