Commit 2b519b57 authored by Wanpeng Li's avatar Wanpeng Li Committed by Paolo Bonzini
Browse files

x86/kvm: Don't bother __pv_cpu_mask when !CONFIG_SMP



Enable PV TLB shootdown when !CONFIG_SMP doesn't make sense. Let's
move it inside CONFIG_SMP. In addition, we can avoid define and
alloc __pv_cpu_mask when !CONFIG_SMP and get rid of 'alloc' variable
in kvm_alloc_cpumask.

Signed-off-by: default avatarWanpeng Li <wanpengli@tencent.com>
Message-Id: <1617941911-5338-1-git-send-email-wanpengli@tencent.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4c6654bd
Loading
Loading
Loading
Loading
+55 −63
Original line number Diff line number Diff line
@@ -451,6 +451,10 @@ static void __init sev_map_percpu_data(void)
	}
}

#ifdef CONFIG_SMP

static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);

static bool pv_tlb_flush_supported(void)
{
	return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
@@ -458,10 +462,6 @@ static bool pv_tlb_flush_supported(void)
		kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
}

static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);

#ifdef CONFIG_SMP

static bool pv_ipi_supported(void)
{
	return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
@@ -574,6 +574,49 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
	}
}

static void kvm_flush_tlb_others(const struct cpumask *cpumask,
			const struct flush_tlb_info *info)
{
	u8 state;
	int cpu;
	struct kvm_steal_time *src;
	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);

	cpumask_copy(flushmask, cpumask);
	/*
	 * We have to call flush only on online vCPUs. And
	 * queue flush_on_enter for pre-empted vCPUs
	 */
	for_each_cpu(cpu, flushmask) {
		src = &per_cpu(steal_time, cpu);
		state = READ_ONCE(src->preempted);
		if ((state & KVM_VCPU_PREEMPTED)) {
			if (try_cmpxchg(&src->preempted, &state,
					state | KVM_VCPU_FLUSH_TLB))
				__cpumask_clear_cpu(cpu, flushmask);
		}
	}

	native_flush_tlb_others(flushmask, info);
}

static __init int kvm_alloc_cpumask(void)
{
	int cpu;

	if (!kvm_para_available() || nopv)
		return 0;

	if (pv_tlb_flush_supported() || pv_ipi_supported())
		for_each_possible_cpu(cpu) {
			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
				GFP_KERNEL, cpu_to_node(cpu));
		}

	return 0;
}
arch_initcall(kvm_alloc_cpumask);

static void __init kvm_smp_prepare_boot_cpu(void)
{
	/*
@@ -611,33 +654,8 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
	local_irq_enable();
	return 0;
}
#endif

static void kvm_flush_tlb_others(const struct cpumask *cpumask,
			const struct flush_tlb_info *info)
{
	u8 state;
	int cpu;
	struct kvm_steal_time *src;
	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);

	cpumask_copy(flushmask, cpumask);
	/*
	 * We have to call flush only on online vCPUs. And
	 * queue flush_on_enter for pre-empted vCPUs
	 */
	for_each_cpu(cpu, flushmask) {
		src = &per_cpu(steal_time, cpu);
		state = READ_ONCE(src->preempted);
		if ((state & KVM_VCPU_PREEMPTED)) {
			if (try_cmpxchg(&src->preempted, &state,
					state | KVM_VCPU_FLUSH_TLB))
				__cpumask_clear_cpu(cpu, flushmask);
		}
	}

	native_flush_tlb_others(flushmask, info);
}
#endif

static void __init kvm_guest_init(void)
{
@@ -653,12 +671,6 @@ static void __init kvm_guest_init(void)
		pv_ops.time.steal_clock = kvm_steal_clock;
	}

	if (pv_tlb_flush_supported()) {
		pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
		pv_ops.mmu.tlb_remove_table = tlb_remove_table;
		pr_info("KVM setup pv remote TLB flush\n");
	}

	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
		apic_set_eoi_write(kvm_guest_apic_eoi_write);

@@ -668,6 +680,12 @@ static void __init kvm_guest_init(void)
	}

#ifdef CONFIG_SMP
	if (pv_tlb_flush_supported()) {
		pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
		pv_ops.mmu.tlb_remove_table = tlb_remove_table;
		pr_info("KVM setup pv remote TLB flush\n");
	}

	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
	if (pv_sched_yield_supported()) {
		smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
@@ -734,7 +752,7 @@ static uint32_t __init kvm_detect(void)

static void __init kvm_apic_init(void)
{
#if defined(CONFIG_SMP)
#ifdef CONFIG_SMP
	if (pv_ipi_supported())
		kvm_setup_pv_ipi();
#endif
@@ -794,32 +812,6 @@ static __init int activate_jump_labels(void)
}
arch_initcall(activate_jump_labels);

static __init int kvm_alloc_cpumask(void)
{
	int cpu;
	bool alloc = false;

	if (!kvm_para_available() || nopv)
		return 0;

	if (pv_tlb_flush_supported())
		alloc = true;

#if defined(CONFIG_SMP)
	if (pv_ipi_supported())
		alloc = true;
#endif

	if (alloc)
		for_each_possible_cpu(cpu) {
			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
				GFP_KERNEL, cpu_to_node(cpu));
		}

	return 0;
}
arch_initcall(kvm_alloc_cpumask);

#ifdef CONFIG_PARAVIRT_SPINLOCKS

/* Kick a cpu by its apicid. Used to wake up a halted vcpu */