Commit baff59cc authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini
Browse files

KVM: Pre-allocate cpumasks for kvm_make_all_cpus_request_except()



Allocating cpumask dynamically in zalloc_cpumask_var() is not ideal.
Allocation is somewhat slow and can (in theory and when CPUMASK_OFFSTACK)
fail. kvm_make_all_cpus_request_except() already disables preemption so
we can use pre-allocated per-cpu cpumasks instead.

Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20210903075141.403071-8-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 381cecc5
Loading
Loading
Loading
Loading
+23 −6
Original line number Original line Diff line number Diff line
@@ -155,6 +155,8 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;
static unsigned long long kvm_active_vms;


static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);

__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
						   unsigned long start, unsigned long end)
						   unsigned long start, unsigned long end)
{
{
@@ -313,14 +315,15 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
				      struct kvm_vcpu *except)
				      struct kvm_vcpu *except)
{
{
	struct kvm_vcpu *vcpu;
	struct kvm_vcpu *vcpu;
	cpumask_var_t cpus;
	struct cpumask *cpus;
	bool called;
	bool called;
	int i, me;
	int i, me;


	zalloc_cpumask_var(&cpus, GFP_ATOMIC);

	me = get_cpu();
	me = get_cpu();


	cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
	cpumask_clear(cpus);

	kvm_for_each_vcpu(i, vcpu, kvm) {
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (vcpu == except)
		if (vcpu == except)
			continue;
			continue;
@@ -330,7 +333,6 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
	called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
	put_cpu();
	put_cpu();


	free_cpumask_var(cpus);
	return called;
	return called;
}
}


@@ -5560,9 +5562,17 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
		goto out_free_3;
		goto out_free_3;
	}
	}


	for_each_possible_cpu(cpu) {
		if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
					    GFP_KERNEL, cpu_to_node(cpu))) {
			r = -ENOMEM;
			goto out_free_4;
		}
	}

	r = kvm_async_pf_init();
	r = kvm_async_pf_init();
	if (r)
	if (r)
		goto out_free;
		goto out_free_5;


	kvm_chardev_ops.owner = module;
	kvm_chardev_ops.owner = module;
	kvm_vm_fops.owner = module;
	kvm_vm_fops.owner = module;
@@ -5588,7 +5598,10 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,


out_unreg:
out_unreg:
	kvm_async_pf_deinit();
	kvm_async_pf_deinit();
out_free:
out_free_5:
	for_each_possible_cpu(cpu)
		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
out_free_4:
	kmem_cache_destroy(kvm_vcpu_cache);
	kmem_cache_destroy(kvm_vcpu_cache);
out_free_3:
out_free_3:
	unregister_reboot_notifier(&kvm_reboot_notifier);
	unregister_reboot_notifier(&kvm_reboot_notifier);
@@ -5608,8 +5621,12 @@ EXPORT_SYMBOL_GPL(kvm_init);


void kvm_exit(void)
void kvm_exit(void)
{
{
	int cpu;

	debugfs_remove_recursive(kvm_debugfs_dir);
	debugfs_remove_recursive(kvm_debugfs_dir);
	misc_deregister(&kvm_dev);
	misc_deregister(&kvm_dev);
	for_each_possible_cpu(cpu)
		free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
	kmem_cache_destroy(kvm_vcpu_cache);
	kmem_cache_destroy(kvm_vcpu_cache);
	kvm_async_pf_deinit();
	kvm_async_pf_deinit();
	unregister_syscore_ops(&kvm_syscore_ops);
	unregister_syscore_ops(&kvm_syscore_ops);