Commit 1489b74d authored by Quan Zhou's avatar Quan Zhou Committed by Zheng Zengkai
Browse files

KVM: arm64: Implement the capability of DVMBM

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I62Q2L


CVE: NA

----------------------------------------------------

Implement the capability of DVMBM. Before each vcpu is loaded, we
re-calculate the VM-wide dvm_cpumask, and if it's changed we will kick all
other vcpus out to reload the latest LSUDVMBM value to the register, and a
new request KVM_REQ_RELOAD_DVMBM is added to implement this.

Otherwise if the dvm_cpumask is not changed by this single vcpu, in order
to ensure the correctness of the contents in the register, we reload the
LSUDVMBM value to the register and nothing else will be done.

Signed-off-by: default avatarQuan Zhou <zhouquan65@huawei.com>
Reviewed-by: default avatarZenghui Yu <yuzenghui@huawei.com>
Reviewed-by: default avatarNianyao Tang <tangnianyao@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent f0a92b35
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -47,6 +47,7 @@
#define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)
#define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
#define KVM_REQ_RELOAD_PMU	KVM_ARCH_REQ(5)
#define KVM_REQ_RELOAD_DVMBM	KVM_ARCH_REQ(6)

#define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
				     KVM_DIRTY_LOG_INITIALLY_SET)
@@ -125,6 +126,7 @@ struct kvm_arch {
#ifdef CONFIG_KVM_HISI_VIRT
	spinlock_t dvm_lock;
	cpumask_t *dvm_cpumask;	/* Union of all vcpu's cpus_ptr */
	u64 lsudvmbm_el2;
#endif
};

+5 −0
Original line number Diff line number Diff line
@@ -752,6 +752,11 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
		if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
			kvm_pmu_handle_pmcr(vcpu,
					    __vcpu_sys_reg(vcpu, PMCR_EL0));

#ifdef CONFIG_KVM_HISI_VIRT
		if (kvm_check_request(KVM_REQ_RELOAD_DVMBM, vcpu))
			kvm_hisi_reload_lsudvmbm(vcpu->kvm);
#endif
	}
}

+124 −1
Original line number Diff line number Diff line
@@ -196,6 +196,97 @@ void kvm_hisi_dvmbm_vcpu_destroy(struct kvm_vcpu *vcpu)
	kfree(vcpu->arch.pre_cpus_ptr);
}

static void __kvm_write_lsudvmbm(struct kvm *kvm)
{
	write_sysreg_s(kvm->arch.lsudvmbm_el2, SYS_LSUDVMBM_EL2);
}

static void kvm_write_lsudvmbm(struct kvm *kvm)
{
	/* Do we really need to hold the dvm_lock?? */
	spin_lock(&kvm->arch.dvm_lock);
	__kvm_write_lsudvmbm(kvm);
	spin_unlock(&kvm->arch.dvm_lock);
}

static int kvm_dvmbm_get_dies_info(struct kvm *kvm, u64 *vm_aff3s, int size)
{
	int num = 0, cpu;

	for_each_cpu(cpu, kvm->arch.dvm_cpumask) {
		bool found = false;
		u64 aff3;
		int i;

		if (num >= size)
			break;

		aff3 = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 3);
		for (i = 0; i < num; i++) {
			if (vm_aff3s[i] == aff3) {
				found = true;
				break;
			}
		}

		if (!found)
			vm_aff3s[num++] = aff3;
	}

	return num;
}

static void kvm_update_vm_lsudvmbm(struct kvm *kvm)
{
	u64 mpidr, aff3, aff2, aff1;
	u64 vm_aff3s[DVMBM_MAX_DIES];
	u64 val;
	int cpu, nr_dies;

	nr_dies = kvm_dvmbm_get_dies_info(kvm, vm_aff3s, DVMBM_MAX_DIES);
	if (nr_dies > 2) {
		val = DVMBM_RANGE_ALL_DIES << DVMBM_RANGE_SHIFT;
		goto out_update;
	}

	if (nr_dies == 1) {
		val = DVMBM_RANGE_ONE_DIE << DVMBM_RANGE_SHIFT	|
		      vm_aff3s[0] << DVMBM_DIE1_SHIFT;

		/* fulfill bits [52:0] */
		for_each_cpu(cpu, kvm->arch.dvm_cpumask) {
			mpidr = cpu_logical_map(cpu);
			aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
			aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);

			val |= 1ULL << (aff2 * 4 + aff1);
		}

		goto out_update;
	}

	/* nr_dies == 2 */
	val = DVMBM_RANGE_TWO_DIES << DVMBM_RANGE_SHIFT	|
	      DVMBM_GRAN_CLUSTER << DVMBM_GRAN_SHIFT	|
	      vm_aff3s[0] << DVMBM_DIE1_SHIFT		|
	      vm_aff3s[1] << DVMBM_DIE2_SHIFT;

	/* and fulfill bits [43:0] */
	for_each_cpu(cpu, kvm->arch.dvm_cpumask) {
		mpidr = cpu_logical_map(cpu);
		aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
		aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);

		if (aff3 == vm_aff3s[0])
			val |= 1ULL << (aff2 + DVMBM_DIE1_CLUSTER_SHIFT);
		else
			val |= 1ULL << (aff2 + DVMBM_DIE2_CLUSTER_SHIFT);
	}

out_update:
	kvm->arch.lsudvmbm_el2 = val;
}

void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = vcpu->kvm;
@@ -210,8 +301,10 @@ void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu)
	cpumask_copy(vcpu->arch.cpus_ptr, current->cpus_ptr);

	if (likely(cpumask_equal(vcpu->arch.cpus_ptr,
				 vcpu->arch.pre_cpus_ptr)))
				 vcpu->arch.pre_cpus_ptr))) {
		kvm_write_lsudvmbm(kvm);
		return;
	}

	/* Re-calculate dvm_cpumask for this VM */
	spin_lock(&kvm->arch.dvm_lock);
@@ -232,7 +325,21 @@ void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu)

	cpumask_copy(kvm->arch.dvm_cpumask, &mask);

	/*
	 * Perform a heavy invalidation for this VMID. Good place
	 * to optimize, right?
	 */
	kvm_flush_remote_tlbs(kvm);

	/*
	 * Re-calculate LSUDVMBM_EL2 for this VM and kick all vcpus
	 * out to reload the LSUDVMBM configuration.
	 */
	kvm_update_vm_lsudvmbm(kvm);
	kvm_make_all_cpus_request(kvm, KVM_REQ_RELOAD_DVMBM);

out_unlock:
	__kvm_write_lsudvmbm(kvm);
	spin_unlock(&kvm->arch.dvm_lock);
}

@@ -242,6 +349,12 @@ void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu)
		return;

	cpumask_copy(vcpu->arch.pre_cpus_ptr, vcpu->arch.cpus_ptr);

	/*
	 * We're pretty sure that host kernel runs at EL2 (as
	 * DVMBM is disabled in case of nVHE) and can't be affected
	 * by the configured SYS_LSUDVMBM_EL2.
	 */
}

int kvm_hisi_init_dvmbm(struct kvm *kvm)
@@ -264,3 +377,13 @@ void kvm_hisi_destroy_dvmbm(struct kvm *kvm)

	kfree(kvm->arch.dvm_cpumask);
}

void kvm_hisi_reload_lsudvmbm(struct kvm *kvm)
{
	if (WARN_ON_ONCE(!kvm_dvmbm_support))
		return;

	preempt_disable();
	kvm_write_lsudvmbm(kvm);
	preempt_enable();
}
+28 −0
Original line number Diff line number Diff line
@@ -19,6 +19,33 @@ enum hisi_cpu_type {
#define SYS_LSUDVM_CTRL_EL2	sys_reg(3, 4, 15, 7, 4)
#define LSUDVM_CTLR_EL2_MASK	BIT_ULL(0)

/*
 * MPIDR_EL1 layout on HIP09
 *
 * Aff3[7:3]	- socket ID	[0-15]
 * Aff3[2:0]	- die ID	[1,3]
 * Aff2		- cluster ID	[0-9]
 * Aff1		- core ID	[0-3]
 * Aff0		- thread ID	[0,1]
 */

#define SYS_LSUDVMBM_EL2		sys_reg(3, 4, 15, 7, 5)
#define DVMBM_RANGE_SHIFT		62
#define DVMBM_RANGE_ONE_DIE		0ULL
#define DVMBM_RANGE_TWO_DIES		1ULL
#define DVMBM_RANGE_ALL_DIES		3ULL

#define DVMBM_GRAN_SHIFT		61
#define DVMBM_GRAN_CLUSTER		0ULL
#define DVMBM_GRAN_DIE			1ULL

#define DVMBM_DIE1_SHIFT		53
#define DVMBM_DIE2_SHIFT		45
#define DVMBM_DIE1_CLUSTER_SHIFT	22
#define DVMBM_DIE2_CLUSTER_SHIFT	0

#define DVMBM_MAX_DIES			32

void probe_hisi_cpu_type(void);
bool hisi_ncsnp_supported(void);
bool hisi_dvmbm_supported(void);
@@ -29,5 +56,6 @@ void kvm_hisi_dvmbm_load(struct kvm_vcpu *vcpu);
void kvm_hisi_dvmbm_put(struct kvm_vcpu *vcpu);
int kvm_hisi_init_dvmbm(struct kvm *kvm);
void kvm_hisi_destroy_dvmbm(struct kvm *kvm);
void kvm_hisi_reload_lsudvmbm(struct kvm *kvm);

#endif /* __HISI_VIRT_H__ */