Commit 510958e9 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: Force PPC to define its own rcuwait object



Do not define/reference kvm_vcpu.wait if __KVM_HAVE_ARCH_WQP is true, and
instead force the architecture (PPC) to define its own rcuwait object.
Allowing common KVM to directly access vcpu->wait without a guard makes
it all too easy to introduce potential bugs, e.g. kvm_vcpu_block(),
kvm_vcpu_on_spin(), and async_pf_execute() all operate on vcpu->wait, not
the result of kvm_arch_vcpu_get_wait(), and so may do the wrong thing for
PPC.

Due to PPC's shenanigans with respect to callbacks and waits (it switches
to the virtual core's wait object at KVM_RUN!?!?), it's not clear whether
or not this fixes any bugs.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20211009021236.4122790-5-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6f390916
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -749,6 +749,7 @@ struct kvm_vcpu_arch {
	u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
	u32 last_inst;

	struct rcuwait wait;
	struct rcuwait *waitp;
	struct kvmppc_vcore *vcore;
	int ret;
+2 −1
Original line number Diff line number Diff line
@@ -753,7 +753,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
	if (err)
		goto out_vcpu_uninit;

	vcpu->arch.waitp = &vcpu->wait;
	rcuwait_init(&vcpu->arch.wait);
	vcpu->arch.waitp = &vcpu->arch.wait;
	kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
	return 0;

+2 −0
Original line number Diff line number Diff line
@@ -314,7 +314,9 @@ struct kvm_vcpu {
	struct mutex mutex;
	struct kvm_run *run;

#ifndef __KVM_HAVE_ARCH_WQP
	struct rcuwait wait;
#endif
	struct pid __rcu *pid;
	int sigset_active;
	sigset_t sigset;
+1 −1
Original line number Diff line number Diff line
@@ -85,7 +85,7 @@ static void async_pf_execute(struct work_struct *work)

	trace_kvm_async_pf_completed(addr, cr2_or_gpa);

	rcuwait_wake_up(&vcpu->wait);
	rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));

	mmput(mm);
	kvm_put_kvm(vcpu->kvm);
+6 −3
Original line number Diff line number Diff line
@@ -422,7 +422,9 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
	vcpu->kvm = kvm;
	vcpu->vcpu_id = id;
	vcpu->pid = NULL;
#ifndef __KVM_HAVE_ARCH_WQP
	rcuwait_init(&vcpu->wait);
#endif
	kvm_async_pf_vcpu_init(vcpu);

	vcpu->pre_pcpu = -1;
@@ -3284,6 +3286,7 @@ update_halt_poll_stats(struct kvm_vcpu *vcpu, u64 poll_ns, bool waited)
 */
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
	struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
	bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
	ktime_t start, cur, poll_end;
	bool waited = false;
@@ -3322,7 +3325,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
	}


	prepare_to_rcuwait(&vcpu->wait);
	prepare_to_rcuwait(wait);
	for (;;) {
		set_current_state(TASK_INTERRUPTIBLE);

@@ -3332,7 +3335,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
		waited = true;
		schedule();
	}
	finish_rcuwait(&vcpu->wait);
	finish_rcuwait(wait);
	cur = ktime_get();
	if (waited) {
		vcpu->stat.generic.halt_wait_ns +=
@@ -3544,7 +3547,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
				continue;
			if (vcpu == me)
				continue;
			if (rcuwait_active(&vcpu->wait) &&
			if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)) &&
			    !vcpu_dy_runnable(vcpu))
				continue;
			if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&