Commit a795cd43 authored by David Woodhouse's avatar David Woodhouse Committed by Paolo Bonzini
Browse files

KVM: x86/xen: Use gfn_to_pfn_cache for runstate area



Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220303154127.202856-4-dwmw2@infradead.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 249f3249
Loading
Loading
Loading
Loading
+1 −2
Original line number Original line Diff line number Diff line
@@ -608,10 +608,9 @@ struct kvm_vcpu_xen {
	u32 current_runstate;
	u32 current_runstate;
	bool vcpu_info_set;
	bool vcpu_info_set;
	bool vcpu_time_info_set;
	bool vcpu_time_info_set;
	bool runstate_set;
	struct gfn_to_hva_cache vcpu_info_cache;
	struct gfn_to_hva_cache vcpu_info_cache;
	struct gfn_to_hva_cache vcpu_time_info_cache;
	struct gfn_to_hva_cache vcpu_time_info_cache;
	struct gfn_to_hva_cache runstate_cache;
	struct gfn_to_pfn_cache runstate_cache;
	u64 last_steal;
	u64 last_steal;
	u64 runstate_entry_time;
	u64 runstate_entry_time;
	u64 runstate_times[4];
	u64 runstate_times[4];
+1 −0
Original line number Original line Diff line number Diff line
@@ -11316,6 +11316,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
	fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
	fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);


	kvm_xen_destroy_vcpu(vcpu);
	kvm_hv_vcpu_uninit(vcpu);
	kvm_hv_vcpu_uninit(vcpu);
	kvm_pmu_destroy(vcpu);
	kvm_pmu_destroy(vcpu);
	kfree(vcpu->arch.mce_banks);
	kfree(vcpu->arch.mce_banks);
+52 −55
Original line number Original line Diff line number Diff line
@@ -133,27 +133,36 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
{
{
	struct kvm_vcpu_xen *vx = &v->arch.xen;
	struct kvm_vcpu_xen *vx = &v->arch.xen;
	struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
	struct gfn_to_pfn_cache *gpc = &vx->runstate_cache;
	struct kvm_memslots *slots = kvm_memslots(v->kvm);
	uint64_t *user_times;
	bool atomic = (state == RUNSTATE_runnable);
	unsigned long flags;
	uint64_t state_entry_time;
	size_t user_len;
	int __user *user_state;
	int *user_state;
	uint64_t __user *user_times;


	kvm_xen_update_runstate(v, state);
	kvm_xen_update_runstate(v, state);


	if (!vx->runstate_set)
	if (!vx->runstate_cache.active)
		return;
		return;


	if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
	if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
	    kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
		user_len = sizeof(struct vcpu_runstate_info);
	else
		user_len = sizeof(struct compat_vcpu_runstate_info);

	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
					   user_len)) {
		read_unlock_irqrestore(&gpc->lock, flags);

		/* When invoked from kvm_sched_out() we cannot sleep */
		if (state == RUNSTATE_runnable)
			return;
			return;


	/* We made sure it fits in a single page */
		if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, user_len))
	BUG_ON(!ghc->memslot);
			return;


	if (atomic)
		read_lock_irqsave(&gpc->lock, flags);
		pagefault_disable();
	}


	/*
	/*
	 * The only difference between 32-bit and 64-bit versions of the
	 * The only difference between 32-bit and 64-bit versions of the
@@ -167,38 +176,33 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
	 */
	 */
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
	BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
	BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
	user_state = (int __user *)ghc->hva;

	BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
	BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);

	user_times = (uint64_t __user *)(ghc->hva +
					 offsetof(struct compat_vcpu_runstate_info,
						  state_entry_time));
#ifdef CONFIG_X86_64
#ifdef CONFIG_X86_64
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
		     offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
		     offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
	BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
		     offsetof(struct compat_vcpu_runstate_info, time) + 4);
		     offsetof(struct compat_vcpu_runstate_info, time) + 4);

	if (v->kvm->arch.xen.long_mode)
		user_times = (uint64_t __user *)(ghc->hva +
						 offsetof(struct vcpu_runstate_info,
							  state_entry_time));
#endif
#endif

	user_state = gpc->khva;

	if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode)
		user_times = gpc->khva + offsetof(struct vcpu_runstate_info,
						  state_entry_time);
	else
		user_times = gpc->khva + offsetof(struct compat_vcpu_runstate_info,
						  state_entry_time);

	/*
	/*
	 * First write the updated state_entry_time at the appropriate
	 * First write the updated state_entry_time at the appropriate
	 * location determined by 'offset'.
	 * location determined by 'offset'.
	 */
	 */
	state_entry_time = vx->runstate_entry_time;
	state_entry_time |= XEN_RUNSTATE_UPDATE;

	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, state_entry_time) !=
		     sizeof(state_entry_time));
		     sizeof(user_times[0]));
	BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
	BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
		     sizeof(state_entry_time));
		     sizeof(user_times[0]));


	if (__put_user(state_entry_time, user_times))
	user_times[0] = vx->runstate_entry_time | XEN_RUNSTATE_UPDATE;
		goto out;
	smp_wmb();
	smp_wmb();


	/*
	/*
@@ -212,8 +216,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
	BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
	BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
		     sizeof(vx->current_runstate));
		     sizeof(vx->current_runstate));


	if (__put_user(vx->current_runstate, user_state))
	*user_state = vx->current_runstate;
		goto out;


	/*
	/*
	 * Write the actual runstate times immediately after the
	 * Write the actual runstate times immediately after the
@@ -228,23 +231,19 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
	BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
		     sizeof(vx->runstate_times));
		     sizeof(vx->runstate_times));


	if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
	memcpy(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times));
		goto out;
	smp_wmb();
	smp_wmb();


	/*
	/*
	 * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
	 * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
	 * runstate_entry_time field.
	 * runstate_entry_time field.
	 */
	 */
	state_entry_time &= ~XEN_RUNSTATE_UPDATE;
	user_times[0] &= ~XEN_RUNSTATE_UPDATE;
	__put_user(state_entry_time, user_times);
	smp_wmb();
	smp_wmb();


 out:
	read_unlock_irqrestore(&gpc->lock, flags);
	mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);


	if (atomic)
	mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
		pagefault_enable();
}
}


int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
@@ -507,24 +506,16 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
			break;
			break;
		}
		}
		if (data->u.gpa == GPA_INVALID) {
		if (data->u.gpa == GPA_INVALID) {
			vcpu->arch.xen.runstate_set = false;
			kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
						     &vcpu->arch.xen.runstate_cache);
			r = 0;
			r = 0;
			break;
			break;
		}
		}


		/* It must fit within a single page */
		r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
		if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
			r = -EINVAL;
			break;
		}

		r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
					      &vcpu->arch.xen.runstate_cache,
					      &vcpu->arch.xen.runstate_cache,
					      data->u.gpa,
					      NULL, KVM_HOST_USES_PFN, data->u.gpa,
					      sizeof(struct vcpu_runstate_info));
					      sizeof(struct vcpu_runstate_info));
		if (!r) {
			vcpu->arch.xen.runstate_set = true;
		}
		break;
		break;


	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
	case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
@@ -659,7 +650,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
			r = -EOPNOTSUPP;
			r = -EOPNOTSUPP;
			break;
			break;
		}
		}
		if (vcpu->arch.xen.runstate_set) {
		if (vcpu->arch.xen.runstate_cache.active) {
			data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
			data->u.gpa = vcpu->arch.xen.runstate_cache.gpa;
			r = 0;
			r = 0;
		}
		}
@@ -1056,3 +1047,9 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,


	return 0;
	return 0;
}
}

void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
{
	kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
				     &vcpu->arch.xen.runstate_cache);
}
+5 −1
Original line number Original line Diff line number Diff line
@@ -23,7 +23,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data);
int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
void kvm_xen_init_vm(struct kvm *kvm);
void kvm_xen_init_vm(struct kvm *kvm);
void kvm_xen_destroy_vm(struct kvm *kvm);
void kvm_xen_destroy_vm(struct kvm *kvm);

void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu);
int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e,
int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e,
			    struct kvm *kvm);
			    struct kvm *kvm);
int kvm_xen_setup_evtchn(struct kvm *kvm,
int kvm_xen_setup_evtchn(struct kvm *kvm,
@@ -65,6 +65,10 @@ static inline void kvm_xen_destroy_vm(struct kvm *kvm)
{
{
}
}


static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
{
}

static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
{
{
	return false;
	return false;