Commit 6b6fcd28 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

kvm: x86: abstract locking around pvclock_update_vm_gtod_copy



Updates to the kvmclock parameters needs to do a complicated dance of
KVM_REQ_MCLOCK_INPROGRESS and KVM_REQ_CLOCK_UPDATE in addition to taking
pvclock_gtod_sync_lock.  Place that in two functions that can be called
on all of master clock update, KVM_SET_CLOCK, and Hyper-V reenlightenment.

Reviewed-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3e44dce4
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -1862,7 +1862,6 @@ u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);

void kvm_make_mclock_inprogress_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
				       unsigned long *vcpu_bitmap);
+29 −33
Original line number Diff line number Diff line
@@ -2743,35 +2743,42 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
#endif
}

void kvm_make_mclock_inprogress_request(struct kvm *kvm)
static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
{
	kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
}

static void kvm_gen_update_masterclock(struct kvm *kvm)
static void kvm_start_pvclock_update(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
	int i;
	struct kvm_vcpu *vcpu;
	struct kvm_arch *ka = &kvm->arch;
	unsigned long flags;

	kvm_hv_invalidate_tsc_page(kvm);

	kvm_make_mclock_inprogress_request(kvm);

	/* no guest entries from this point */
	spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
	pvclock_update_vm_gtod_copy(kvm);
	spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
	spin_lock_irq(&ka->pvclock_gtod_sync_lock);
}

static void kvm_end_pvclock_update(struct kvm *kvm)
{
	struct kvm_arch *ka = &kvm->arch;
	struct kvm_vcpu *vcpu;
	int i;

	spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);

	/* guest entries allowed */
	kvm_for_each_vcpu(i, vcpu, kvm)
		kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
#endif
}

static void kvm_update_masterclock(struct kvm *kvm)
{
	kvm_hv_invalidate_tsc_page(kvm);
	kvm_start_pvclock_update(kvm);
	pvclock_update_vm_gtod_copy(kvm);
	kvm_end_pvclock_update(kvm);
}

u64 get_kvmclock_ns(struct kvm *kvm)
@@ -6067,12 +6074,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
			goto out;

		r = 0;
		/*
		 * TODO: userspace has to take care of races with VCPU_RUN, so
		 * kvm_gen_update_masterclock() can be cut down to locked
		 * pvclock_update_vm_gtod_copy().
		 */
		kvm_gen_update_masterclock(kvm);

		kvm_hv_invalidate_tsc_page(kvm);
		kvm_start_pvclock_update(kvm);
		pvclock_update_vm_gtod_copy(kvm);

		/*
		 * This pairs with kvm_guest_time_update(): when masterclock is
@@ -6081,15 +6086,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
		 * is slightly ahead) here we risk going negative on unsigned
		 * 'system_time' when 'user_ns.clock' is very small.
		 */
		spin_lock_irq(&ka->pvclock_gtod_sync_lock);
		if (kvm->arch.use_master_clock)
			now_ns = ka->master_kernel_ns;
		else
			now_ns = get_kvmclock_base_ns();
		ka->kvmclock_offset = user_ns.clock - now_ns;
		spin_unlock_irq(&ka->pvclock_gtod_sync_lock);

		kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
		kvm_end_pvclock_update(kvm);
		break;
	}
	case KVM_GET_CLOCK: {
@@ -8102,14 +8104,13 @@ static void tsc_khz_changed(void *data)
static void kvm_hyperv_tsc_notifier(void)
{
	struct kvm *kvm;
	struct kvm_vcpu *vcpu;
	int cpu;
	unsigned long flags;

	mutex_lock(&kvm_lock);
	list_for_each_entry(kvm, &vm_list, vm_list)
		kvm_make_mclock_inprogress_request(kvm);

	/* no guest entries from this point */
	hyperv_stop_tsc_emulation();

	/* TSC frequency always matches when on Hyper-V */
@@ -8120,16 +8121,11 @@ static void kvm_hyperv_tsc_notifier(void)
	list_for_each_entry(kvm, &vm_list, vm_list) {
		struct kvm_arch *ka = &kvm->arch;

		spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
		spin_lock_irq(&ka->pvclock_gtod_sync_lock);
		pvclock_update_vm_gtod_copy(kvm);
		spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);

		kvm_for_each_vcpu(cpu, vcpu, kvm)
			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);

		kvm_for_each_vcpu(cpu, vcpu, kvm)
			kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
		kvm_end_pvclock_update(kvm);
	}

	mutex_unlock(&kvm_lock);
}
#endif
@@ -9406,7 +9402,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
			__kvm_migrate_timers(vcpu);
		if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
			kvm_gen_update_masterclock(vcpu->kvm);
			kvm_update_masterclock(vcpu->kvm);
		if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
			kvm_gen_kvmclock_update(vcpu);
		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {