Commit f0a92b35 authored by Quan Zhou's avatar Quan Zhou Committed by Zheng Zengkai
Browse files

KVM: arm64: Add kvm_arch::dvm_cpumask and dvm_lock

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I62Q2L


CVE: NA

----------------------------------------------------

Introduce dvm_cpumask and dvm_lock in struct kvm_arch. dvm_cpumask will
store the union of all vcpus' cpus_ptr and will be used for the TLBI
broadcast range. dvm_lock ensures a exclusive manipulation of dvm_cpumask.

In vcpu_load, we should decide whether to perform the subsequent update
operation by checking whether dvm_cpumask has changed.

Signed-off-by: default avatarQuan Zhou <zhouquan65@huawei.com>
Reviewed-by: default avatarZenghui Yu <yuzenghui@huawei.com>
Reviewed-by: default avatarNianyao Tang <tangnianyao@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 8d396309
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -121,6 +121,11 @@ struct kvm_arch {
	unsigned int pmuver;

	u8 pfr0_csv2;

#ifdef CONFIG_KVM_HISI_VIRT
	spinlock_t dvm_lock;
	cpumask_t *dvm_cpumask;	/* Union of all vcpu's cpus_ptr */
#endif
};

struct kvm_vcpu_fault_info {
+10 −0
Original line number Diff line number Diff line
@@ -143,6 +143,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
	int ret;

#ifdef CONFIG_KVM_HISI_VIRT
	ret = kvm_hisi_init_dvmbm(kvm);
	if (ret)
		return ret;
#endif

	ret = kvm_arm_setup_stage2(kvm, type);
	if (ret)
		return ret;
@@ -182,6 +188,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
	int i;

#ifdef CONFIG_KVM_HISI_VIRT
	kvm_hisi_destroy_dvmbm(kvm);
#endif

	bitmap_free(kvm->arch.pmu_filter);

	kvm_vgic_destroy(kvm);
+53 −0
Original line number Diff line number Diff line
@@ -198,10 +198,42 @@ void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu)

void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = vcpu->kvm;
	struct kvm_vcpu *tmp;
	cpumask_t mask;
	int i;

	/* Don't bother on old hardware */
	if (!kvm_dvmbm_support)
		return;

	cpumask_copy(vcpu->arch.cpus_ptr, current->cpus_ptr);

	if (likely(cpumask_equal(vcpu->arch.cpus_ptr,
				 vcpu->arch.pre_cpus_ptr)))
		return;

	/* Re-calculate dvm_cpumask for this VM */
	spin_lock(&kvm->arch.dvm_lock);

	cpumask_clear(&mask);
	kvm_for_each_vcpu(i, tmp, kvm) {
		/*
		 * We may get the stale cpus_ptr if another thread
		 * is concurrently changing its affinity. It'll
		 * eventually go through vcpu_load() and we rely on
		 * the last dvm_lock holder to make things correct.
		 */
		cpumask_or(&mask, &mask, tmp->arch.cpus_ptr);
	}

	if (cpumask_equal(kvm->arch.dvm_cpumask, &mask))
		goto out_unlock;

	cpumask_copy(kvm->arch.dvm_cpumask, &mask);

out_unlock:
	spin_unlock(&kvm->arch.dvm_lock);
}

void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu)
@@ -211,3 +243,24 @@ void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu)

	cpumask_copy(vcpu->arch.pre_cpus_ptr, vcpu->arch.cpus_ptr);
}

int kvm_hisi_init_dvmbm(struct kvm *kvm)
{
	if (!kvm_dvmbm_support)
		return 0;

	spin_lock_init(&kvm->arch.dvm_lock);
	kvm->arch.dvm_cpumask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC);
	if (!kvm->arch.dvm_cpumask)
		return -ENOMEM;

	return 0;
}

void kvm_hisi_destroy_dvmbm(struct kvm *kvm)
{
	if (!kvm_dvmbm_support)
		return;

	kfree(kvm->arch.dvm_cpumask);
}
+2 −0
Original line number Diff line number Diff line
@@ -27,5 +27,7 @@ int kvm_hisi_dvmbm_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu);
void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu);
void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu);
int kvm_hisi_init_dvmbm(struct kvm *kvm);
void kvm_hisi_destroy_dvmbm(struct kvm *kvm);

#endif /* __HISI_VIRT_H__ */