Commit e308c24a authored by Michal Luczaj's avatar Michal Luczaj Committed by David Woodhouse
Browse files

KVM: Use gfn_to_pfn_cache's immutable "kvm" in kvm_gpc_check()



Make kvm_gpc_check() use kvm instance cached in gfn_to_pfn_cache.

Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarMichal Luczaj <mhal@rbox.co>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
parent 8c82a0b3
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -3035,7 +3035,7 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
	unsigned long flags;

	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
	while (!kvm_gpc_check(gpc, gpc->gpa,
			      offset + sizeof(*guest_hv_clock))) {
		read_unlock_irqrestore(&gpc->lock, flags);

+7 −9
Original line number Diff line number Diff line
@@ -272,7 +272,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
	 * gfn_to_pfn caches that cover the region.
	 */
	read_lock_irqsave(&gpc1->lock, flags);
	while (!kvm_gpc_check(v->kvm, gpc1, gpc1->gpa, user_len1)) {
	while (!kvm_gpc_check(gpc1, gpc1->gpa, user_len1)) {
		read_unlock_irqrestore(&gpc1->lock, flags);

		/* When invoked from kvm_sched_out() we cannot sleep */
@@ -308,7 +308,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
		 */
		read_lock(&gpc2->lock);

		if (!kvm_gpc_check(v->kvm, gpc2, gpc2->gpa, user_len2)) {
		if (!kvm_gpc_check(gpc2, gpc2->gpa, user_len2)) {
			read_unlock(&gpc2->lock);
			read_unlock_irqrestore(&gpc1->lock, flags);

@@ -488,8 +488,7 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
	 * little more honest about it.
	 */
	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
			      sizeof(struct vcpu_info))) {
	while (!kvm_gpc_check(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
		read_unlock_irqrestore(&gpc->lock, flags);

		if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
@@ -553,8 +552,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
		     sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));

	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
			      sizeof(struct vcpu_info))) {
	while (!kvm_gpc_check(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
		read_unlock_irqrestore(&gpc->lock, flags);

		/*
@@ -1158,7 +1156,7 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,

	read_lock_irqsave(&gpc->lock, flags);
	idx = srcu_read_lock(&kvm->srcu);
	if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
	if (!kvm_gpc_check(gpc, gpc->gpa, PAGE_SIZE))
		goto out_rcu;

	ret = false;
@@ -1580,7 +1578,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
	idx = srcu_read_lock(&kvm->srcu);

	read_lock_irqsave(&gpc->lock, flags);
	if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
	if (!kvm_gpc_check(gpc, gpc->gpa, PAGE_SIZE))
		goto out_rcu;

	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
@@ -1614,7 +1612,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
		gpc = &vcpu->arch.xen.vcpu_info_cache;

		read_lock_irqsave(&gpc->lock, flags);
		if (!kvm_gpc_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
		if (!kvm_gpc_check(gpc, gpc->gpa, sizeof(struct vcpu_info))) {
			/*
			 * Could not access the vcpu_info. Set the bit in-kernel
			 * and prod the vCPU to deliver it for itself.
+1 −3
Original line number Diff line number Diff line
@@ -1298,7 +1298,6 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
/**
 * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
 *
 * @kvm:	   pointer to kvm instance.
 * @gpc:	   struct gfn_to_pfn_cache object.
 * @gpa:	   current guest physical address to map.
 * @len:	   sanity check; the range being access must fit a single page.
@@ -1313,8 +1312,7 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
 * Callers in IN_GUEST_MODE may do so without locking, although they should
 * still hold a read lock on kvm->scru for the memslot checks.
 */
bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
		   unsigned long len);
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);

/**
 * kvm_gpc_refresh - update a previously initialized cache.
+2 −3
Original line number Diff line number Diff line
@@ -76,10 +76,9 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
	}
}

bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
		   unsigned long len)
bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	struct kvm_memslots *slots = kvm_memslots(gpc->kvm);

	if (!gpc->active)
		return false;