Commit 86394287 authored by Bibo Mao's avatar Bibo Mao Committed by Xianglai Li
Browse files

LoongArch: KVM: Add vcpu mapping from physical cpuid

mainline inclusion
from mainline-v6.10-rc1
commit 73516e9da512adc63ba3859fbd82a21f6257348f
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IAZJDO


CVE: NA

--------------------------------

Physical CPUID is used for interrupt routing for irqchips such as ipi,
msgint and eiointc interrupt controllers. Physical CPUID is stored at
the CSR register LOONGARCH_CSR_CPUID, it can not be changed once vcpu
is created and the physical CPUIDs of two vcpus cannot be the same.

Different irqchips have different size declaration about physical CPUID,
the max CPUID value for CSR LOONGARCH_CSR_CPUID on Loongson-3A5000 is
512, the max CPUID supported by IPI hardware is 1024, while for eiointc
irqchip is 256, and for msgint irqchip is 65536.

The smallest value from all interrupt controllers is selected now, and
the max cpuid size is defines as 256 by KVM which comes from the eiointc
irqchip.

Signed-off-by: default avatarBibo Mao <maobibo@loongson.cn>
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
Signed-off-by: default avatarXianglai Li <lixianglai@loongson.cn>
parent 32b9aca3
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -107,12 +107,13 @@ struct kvm_world_switch {
#define MAX_PGTABLE_LEVELS	4

/*
 * Physical cpu id is used for interrupt routing, there are different
 * Physical CPUID is used for interrupt routing, there are different
 * definitions about physical cpuid on different hardwares.
 *  For LOONGARCH_CSR_CPUID register, max cpuid size if 512
 *  For IPI HW, max dest CPUID size 1024
 *  For extioi interrupt controller, max dest CPUID size is 256
 *  For MSI interrupt controller, max supported CPUID size is 65536
 *
 *  For LOONGARCH_CSR_CPUID register, max CPUID size if 512
 *  For IPI hardware, max destination CPUID size 1024
 *  For extioi interrupt controller, max destination CPUID size is 256
 *  For msgint interrupt controller, max supported CPUID size is 65536
 *
 * Currently max CPUID is defined as 256 for KVM hypervisor, in future
 * it will be expanded to 4096, including 16 packages at most. And every
+81 −81
Original line number Diff line number Diff line
@@ -447,98 +447,78 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
	return 0;
}

static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
{
	unsigned long gintc;
	struct loongarch_csrs *csr = vcpu->arch.csr;

	if (get_gcsr_flag(id) & INVALID_GCSR)
		return -EINVAL;

	if (id == LOONGARCH_CSR_ESTAT) {
		preempt_disable();
		vcpu_load(vcpu);
		/*
		 * Sync pending interrupts into ESTAT so that interrupt
		 * remains during VM migration stage
		 */
		kvm_deliver_intr(vcpu);
		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
		vcpu_put(vcpu);
		preempt_enable();

		/* ESTAT IP0~IP7 get from GINTC */
		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
		return 0;
	}

	/*
	 * Get software CSR state since software state is consistent
	 * with hardware for synchronous ioctl
	 */
	*val = kvm_read_sw_gcsr(csr, id);

	return 0;
}

static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
{
	int cpuid;
	struct loongarch_csrs *csr = vcpu->arch.csr;
	struct kvm_phyid_map *map;
	struct loongarch_csrs *csr = vcpu->arch.csr;

	if (val >= KVM_MAX_PHYID)
		return -EINVAL;

	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT);
	map = vcpu->kvm->arch.phyid_map;
	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);

	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
	if (map->phys_map[cpuid].enabled) {
		/*
		 * Cpuid is already set before
		 * Forbid changing different cpuid at runtime
		 */
		if (cpuid != val) {
	if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
		/* Discard duplicated CPUID set operation */
		if (cpuid == val) {
			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
			return 0;
		}

		/*
			 * Cpuid 0 is initial value for vcpu, maybe invalid
			 * unset value for vcpu
		 * CPUID is already set before
		 * Forbid changing to a different CPUID at runtime
		 */
			if (cpuid) {
		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
		return -EINVAL;
	}
		} else {
			 /* Discard duplicated cpuid set */

	if (map->phys_map[val].enabled) {
		/* Discard duplicated CPUID set operation */
		if (vcpu == map->phys_map[val].vcpu) {
			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
			return 0;
		}
	}

	if (map->phys_map[val].enabled) {
		/*
		 * New cpuid is already set with other vcpu
		 * Forbid sharing the same cpuid between different vcpus
		 * New CPUID is already set with other vcpu
		 * Forbid sharing the same CPUID between different vcpus
		 */
		if (map->phys_map[val].vcpu != vcpu) {
		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
		return -EINVAL;
	}

		/* Discard duplicated cpuid set operation*/
		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
		return 0;
	}

	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
	map->phys_map[val].enabled	= true;
	map->phys_map[val].vcpu		= vcpu;
	if (map->max_phyid < val)
		map->max_phyid = val;
	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);

	return 0;
}

static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
{
	int cpuid;
	struct kvm_phyid_map *map;
	struct loongarch_csrs *csr = vcpu->arch.csr;

	map = vcpu->kvm->arch.phyid_map;
	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);

	if (cpuid >= KVM_MAX_PHYID)
		return;

	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
	if (map->phys_map[cpuid].enabled) {
		map->phys_map[cpuid].vcpu = NULL;
		map->phys_map[cpuid].enabled = false;
		kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
	}
	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
}

struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
{
	struct kvm_phyid_map *map;
@@ -547,28 +527,45 @@ struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
		return NULL;

	map = kvm->arch.phyid_map;
	if (map->phys_map[cpuid].enabled)
		return map->phys_map[cpuid].vcpu;

	if (!map->phys_map[cpuid].enabled)
		return NULL;

	return map->phys_map[cpuid].vcpu;
}

static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
{
	int cpuid;
	unsigned long gintc;
	struct loongarch_csrs *csr = vcpu->arch.csr;
	struct kvm_phyid_map  *map;

	map = vcpu->kvm->arch.phyid_map;
	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT);
	if (cpuid >= KVM_MAX_PHYID)
		return;
	if (get_gcsr_flag(id) & INVALID_GCSR)
		return -EINVAL;

	if (map->phys_map[cpuid].enabled) {
		map->phys_map[cpuid].vcpu = NULL;
		map->phys_map[cpuid].enabled = false;
		kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, 0);
	if (id == LOONGARCH_CSR_ESTAT) {
		preempt_disable();
		vcpu_load(vcpu);
		/*
		 * Sync pending interrupts into ESTAT so that interrupt
		 * remains during VM migration stage
		 */
		kvm_deliver_intr(vcpu);
		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
		vcpu_put(vcpu);
		preempt_enable();

		/* ESTAT IP0~IP7 get from GINTC */
		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
		return 0;
	}

	/*
	 * Get software CSR state since software state is consistent
	 * with hardware for synchronous ioctl
	 */
	*val = kvm_read_sw_gcsr(csr, id);

	return 0;
}

static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
@@ -579,6 +576,9 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
	if (get_gcsr_flag(id) & INVALID_GCSR)
		return -EINVAL;

	if (id == LOONGARCH_CSR_CPUID)
		return kvm_set_cpuid(vcpu, val);

	if (id == LOONGARCH_CSR_ESTAT) {
		/* ESTAT IP0~IP7 inject through GINTC */
		gintc = (val >> 2) & 0xff;
@@ -588,8 +588,7 @@ static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);

		return ret;
	} else if (id == LOONGARCH_CSR_CPUID)
		return kvm_set_cpuid(vcpu, val);
	}

	kvm_write_sw_gcsr(csr, id, val);

@@ -1491,6 +1490,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)

	/* Set cpuid */
	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);

	/* Start with no pending virtual guest interrupts */
	csr->csrs[LOONGARCH_CSR_GINTC] = 0;
@@ -1509,8 +1509,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)

	hrtimer_cancel(&vcpu->arch.swtimer);
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
	kfree(vcpu->arch.csr);
	kvm_drop_cpuid(vcpu);
	kfree(vcpu->arch.csr);

	/*
	 * If the vCPU is freed and reused as another vCPU, we don't want the
+3 −4
Original line number Diff line number Diff line
@@ -33,13 +33,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
	if (!kvm->arch.pgd)
		return -ENOMEM;

	kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map),
				GFP_KERNEL_ACCOUNT);
	kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), GFP_KERNEL_ACCOUNT);
	if (!kvm->arch.phyid_map) {
		free_page((unsigned long)kvm->arch.pgd);
		kvm->arch.pgd = NULL;
		return -ENOMEM;
	}
	spin_lock_init(&kvm->arch.phyid_map_lock);

	kvm_init_vmcs(kvm);

@@ -61,7 +61,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
	for (i = 0; i <= kvm->arch.root_level; i++)
		kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3);

	spin_lock_init(&kvm->arch.phyid_map_lock);
	return 0;
}

@@ -69,8 +68,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
	kvm_destroy_vcpus(kvm);
	free_page((unsigned long)kvm->arch.pgd);
	kvfree(kvm->arch.phyid_map);
	kvm->arch.pgd = NULL;
	kvfree(kvm->arch.phyid_map);
	kvm->arch.phyid_map = NULL;
}