Commit 0c2c7c06 authored by Peter Gonda's avatar Peter Gonda Committed by Paolo Bonzini
Browse files

KVM: SEV: Mark nested locking of vcpu->lock



svm_vm_migrate_from() uses sev_lock_vcpus_for_migration() to lock all
source and target vcpu->locks. Unfortunately there is an 8 subclass
limit, so a new subclass cannot be used for each vCPU. Instead maintain
ownership of the first vcpu's mutex.dep_map using a role specific
subclass: source vs target. Release the other vcpu's mutex.dep_maps.

Fixes: b5663931 ("KVM: SEV: Add support for SEV intra host migration")
Reported-by: default avatarJohn <Sperbeck&lt;jsperbeck@google.com>
Suggested-by: default avatarDavid Rientjes <rientjes@google.com>
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarPeter Gonda <pgonda@google.com>

Message-Id: <20220502165807.529624-1-pgonda@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 04144108
Loading
Loading
Loading
Loading
+38 −4
Original line number Diff line number Diff line
@@ -1594,24 +1594,51 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
	atomic_set_release(&src_sev->migration_in_progress, 0);
}

/* vCPU mutex subclasses.  */
enum sev_migration_role {
	SEV_MIGRATION_SOURCE = 0,
	SEV_MIGRATION_TARGET,
	SEV_NR_MIGRATION_ROLES,
};

static int sev_lock_vcpus_for_migration(struct kvm *kvm)
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
					enum sev_migration_role role)
{
	struct kvm_vcpu *vcpu;
	unsigned long i, j;
	bool first = true;

	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (mutex_lock_killable(&vcpu->mutex))
		if (mutex_lock_killable_nested(&vcpu->mutex, role))
			goto out_unlock;

		if (first) {
			/*
			 * Reset the role to one that avoids colliding with
			 * the role used for the first vcpu mutex.
			 */
			role = SEV_NR_MIGRATION_ROLES;
			first = false;
		} else {
			mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
		}
	}

	return 0;

out_unlock:

	first = true;
	kvm_for_each_vcpu(j, vcpu, kvm) {
		if (i == j)
			break;

		if (first)
			first = false;
		else
			mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);


		mutex_unlock(&vcpu->mutex);
	}
	return -EINTR;
@@ -1621,8 +1648,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
{
	struct kvm_vcpu *vcpu;
	unsigned long i;
	bool first = true;

	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (first)
			first = false;
		else
			mutex_acquire(&vcpu->mutex.dep_map,
				      SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);

		mutex_unlock(&vcpu->mutex);
	}
}
@@ -1748,10 +1782,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
		charged = true;
	}

	ret = sev_lock_vcpus_for_migration(kvm);
	ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
	if (ret)
		goto out_dst_cgroup;
	ret = sev_lock_vcpus_for_migration(source_kvm);
	ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
	if (ret)
		goto out_dst_vcpu;