Commit 319d1a95 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch kvm-arm64/6.6/generic-vcpu into kvmarm-master/next



* kvm-arm64/6.6/generic-vcpu:
  : .
  : Cleanup the obsolete vcpu target abstraction, courtesy of Oliver.
  : From the cover letter:
  :
  : "kvm_vcpu_init::target is quite useless at this point. We don't do any
  : uarch-specific emulation in the first place, and require userspace
  : select the 'generic' vCPU target on all but a few implementations.
  :
  : Small series to (1) clean up usage of the target value in the kernel and
  : (2) switch to the 'generic' target on implementations that previously
  : had their own target values. The implementation-specific values are
  : still tolerated, though, to avoid UAPI breakage."
  : .
  KVM: arm64: Always return generic v8 as the preferred target
  KVM: arm64: Replace vCPU target with a configuration flag
  KVM: arm64: Remove pointless check for changed init target
  KVM: arm64: Delete pointless switch statement in kvm_reset_vcpu()

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents 6eaae198 5346f7e1
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -567,8 +567,7 @@ struct kvm_vcpu_arch {
	/* Cache some mmu pages needed inside spinlock regions */
	struct kvm_mmu_memory_cache mmu_page_cache;

	/* Target CPU and feature flags */
	int target;
	/* feature flags */
	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);

	/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
@@ -669,6 +668,8 @@ struct kvm_vcpu_arch {
#define VCPU_SVE_FINALIZED	__vcpu_single_flag(cflags, BIT(1))
/* PTRAUTH exposed to guest */
#define GUEST_HAS_PTRAUTH	__vcpu_single_flag(cflags, BIT(2))
/* KVM_ARM_VCPU_INIT completed */
#define VCPU_INITIALIZED	__vcpu_single_flag(cflags, BIT(3))

/* Exception pending */
#define PENDING_EXCEPTION	__vcpu_single_flag(iflags, BIT(0))
@@ -899,7 +900,6 @@ struct kvm_vcpu_stat {
	u64 exits;
};

void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+11 −13
Original line number Diff line number Diff line
@@ -365,7 +365,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
#endif

	/* Force users to call KVM_ARM_VCPU_INIT */
	vcpu->arch.target = -1;
	vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);

	vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
@@ -574,7 +574,7 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)

static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
	return vcpu->arch.target >= 0;
	return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
}

/*
@@ -1058,7 +1058,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
			 * invalid. The VMM can try and fix it by issuing  a
			 * KVM_ARM_VCPU_INIT if it really wants to.
			 */
			vcpu->arch.target = -1;
			vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
			ret = ARM_EXCEPTION_IL;
		}

@@ -1219,8 +1219,7 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
{
	unsigned long features = init->features[0];

	return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES) ||
			vcpu->arch.target != init->target;
	return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
}

static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
@@ -1236,20 +1235,18 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
	    !bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES))
		goto out_unlock;

	vcpu->arch.target = init->target;
	bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);

	/* Now we know what it is, we can reset it. */
	ret = kvm_reset_vcpu(vcpu);
	if (ret) {
		vcpu->arch.target = -1;
		bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
		goto out_unlock;
	}

	bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
	set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);

	vcpu_set_flag(vcpu, VCPU_INITIALIZED);
out_unlock:
	mutex_unlock(&kvm->arch.config_lock);
	return ret;
@@ -1260,14 +1257,15 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
{
	int ret;

	if (init->target != kvm_target_cpu())
	if (init->target != KVM_ARM_TARGET_GENERIC_V8 &&
	    init->target != kvm_target_cpu())
		return -EINVAL;

	ret = kvm_vcpu_init_check_features(vcpu, init);
	if (ret)
		return ret;

	if (vcpu->arch.target == -1)
	if (!kvm_vcpu_initialized(vcpu))
		return __kvm_vcpu_set_target(vcpu, init);

	if (kvm_vcpu_init_changed(vcpu, init))
@@ -1595,9 +1593,9 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
		return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
	}
	case KVM_ARM_PREFERRED_TARGET: {
		struct kvm_vcpu_init init;

		kvm_vcpu_preferred_target(&init);
		struct kvm_vcpu_init init = {
			.target = KVM_ARM_TARGET_GENERIC_V8,
		};

		if (copy_to_user(argp, &init, sizeof(init)))
			return -EFAULT;
+0 −15
Original line number Diff line number Diff line
@@ -884,21 +884,6 @@ u32 __attribute_const__ kvm_target_cpu(void)
	return KVM_ARM_TARGET_GENERIC_V8;
}

void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
	u32 target = kvm_target_cpu();

	memset(init, 0, sizeof(*init));

	/*
	 * For now, we don't return any features.
	 * In future, we might use features to return target
	 * specific features available for the preferred
	 * target type.
	 */
	init->target = (__u32)target;
}

int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
	return -EINVAL;
+1 −1
Original line number Diff line number Diff line
@@ -236,7 +236,7 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
		 * KVM_ARM_VCPU_INIT, however, this is likely not possible for
		 * protected VMs.
		 */
		vcpu->arch.target = -1;
		vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
		*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
		*exit_code |= ARM_EXCEPTION_IL;
	}
+10 −15
Original line number Diff line number Diff line
@@ -248,22 +248,17 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
		}
	}

	switch (vcpu->arch.target) {
	default:
		if (vcpu_el1_is_32bit(vcpu)) {
	if (vcpu_el1_is_32bit(vcpu))
		pstate = VCPU_RESET_PSTATE_SVC;
		} else if (vcpu_has_nv(vcpu)) {
	else if (vcpu_has_nv(vcpu))
		pstate = VCPU_RESET_PSTATE_EL2;
		} else {
	else
		pstate = VCPU_RESET_PSTATE_EL1;
		}

	if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
		ret = -EINVAL;
		goto out;
	}
		break;
	}

	/* Reset core registers */
	memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));