Commit aba3caef authored by Michal Luczaj's avatar Michal Luczaj Committed by Paolo Bonzini
Browse files

KVM: Shorten gfn_to_pfn_cache function names



Formalize "gpc" as the acronym and use it in function names.

No functional change intended.

Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarMichal Luczaj <mhal@rbox.co>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 8acc3518
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -3037,11 +3037,11 @@ static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
	unsigned long flags;

	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
	while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
			      offset + sizeof(*guest_hv_clock))) {
		read_unlock_irqrestore(&gpc->lock, flags);

		if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
		if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
				    offset + sizeof(*guest_hv_clock)))
			return;

+15 −15
Original line number Diff line number Diff line
@@ -273,14 +273,14 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
	 * gfn_to_pfn caches that cover the region.
	 */
	read_lock_irqsave(&gpc1->lock, flags);
	while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc1, gpc1->gpa, user_len1)) {
	while (!kvm_gpc_check(v->kvm, gpc1, gpc1->gpa, user_len1)) {
		read_unlock_irqrestore(&gpc1->lock, flags);

		/* When invoked from kvm_sched_out() we cannot sleep */
		if (atomic)
			return;

		if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc1, gpc1->gpa, user_len1))
		if (kvm_gpc_refresh(v->kvm, gpc1, gpc1->gpa, user_len1))
			return;

		read_lock_irqsave(&gpc1->lock, flags);
@@ -309,7 +309,7 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
		 */
		read_lock(&gpc2->lock);

		if (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc2, gpc2->gpa, user_len2)) {
		if (!kvm_gpc_check(v->kvm, gpc2, gpc2->gpa, user_len2)) {
			read_unlock(&gpc2->lock);
			read_unlock_irqrestore(&gpc1->lock, flags);

@@ -489,11 +489,11 @@ void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
	 * little more honest about it.
	 */
	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
	while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
			      sizeof(struct vcpu_info))) {
		read_unlock_irqrestore(&gpc->lock, flags);

		if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
		if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
				    sizeof(struct vcpu_info)))
			return;

@@ -554,7 +554,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
		     sizeof_field(struct compat_vcpu_info, evtchn_upcall_pending));

	read_lock_irqsave(&gpc->lock, flags);
	while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
	while (!kvm_gpc_check(v->kvm, gpc, gpc->gpa,
			      sizeof(struct vcpu_info))) {
		read_unlock_irqrestore(&gpc->lock, flags);

@@ -569,7 +569,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
		if (in_atomic() || !task_is_running(current))
			return 1;

		if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
		if (kvm_gpc_refresh(v->kvm, gpc, gpc->gpa,
				    sizeof(struct vcpu_info))) {
			/*
			 * If this failed, userspace has screwed up the
@@ -1167,7 +1167,7 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,

	read_lock_irqsave(&gpc->lock, flags);
	idx = srcu_read_lock(&kvm->srcu);
	if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
	if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
		goto out_rcu;

	ret = false;
@@ -1564,7 +1564,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
	idx = srcu_read_lock(&kvm->srcu);

	read_lock_irqsave(&gpc->lock, flags);
	if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
	if (!kvm_gpc_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
		goto out_rcu;

	if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
@@ -1598,7 +1598,7 @@ int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
		gpc = &vcpu->arch.xen.vcpu_info_cache;

		read_lock_irqsave(&gpc->lock, flags);
		if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
		if (!kvm_gpc_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) {
			/*
			 * Could not access the vcpu_info. Set the bit in-kernel
			 * and prod the vCPU to deliver it for itself.
@@ -1696,7 +1696,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
			break;

		idx = srcu_read_lock(&kvm->srcu);
		rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
		rc = kvm_gpc_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE);
		srcu_read_unlock(&kvm->srcu, idx);
	} while(!rc);

+10 −11
Original line number Diff line number Diff line
@@ -1288,16 +1288,15 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc);
 *                 -EFAULT for an untranslatable guest physical address.
 *
 * This primes a gfn_to_pfn_cache and links it into the @kvm's list for
 * invalidations to be processed.  Callers are required to use
 * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before
 * accessing the target page.
 * invalidations to be processed.  Callers are required to use kvm_gpc_check()
 * to ensure that the cache is valid before accessing the target page.
 */
int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
		     struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
		     gpa_t gpa, unsigned long len);

/**
 * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
 * kvm_gpc_check - check validity of a gfn_to_pfn_cache.
 *
 * @kvm:	   pointer to kvm instance.
 * @gpc:	   struct gfn_to_pfn_cache object.
@@ -1314,11 +1313,11 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
 * Callers in IN_GUEST_MODE may do so without locking, although they should
 * still hold a read lock on kvm->scru for the memslot checks.
 */
bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
				gpa_t gpa, unsigned long len);
bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
		   unsigned long len);

/**
 * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache.
 * kvm_gpc_refresh - update a previously initialized cache.
 *
 * @kvm:	   pointer to kvm instance.
 * @gpc:	   struct gfn_to_pfn_cache object.
@@ -1335,11 +1334,11 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
 * still lock and check the cache status, as this function does not return
 * with the lock still held to permit access.
 */
int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
				 gpa_t gpa, unsigned long len);
int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
		    unsigned long len);

/**
 * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
 * kvm_gpc_unmap - temporarily unmap a gfn_to_pfn_cache.
 *
 * @kvm:	   pointer to kvm instance.
 * @gpc:	   struct gfn_to_pfn_cache object.
@@ -1348,7 +1347,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
 * but at least the mapping from GPA to userspace HVA will remain cached
 * and can be reused on a subsequent refresh.
 */
void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);

/**
 * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
+10 −10
Original line number Diff line number Diff line
@@ -76,8 +76,8 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
	}
}

bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
				gpa_t gpa, unsigned long len)
bool kvm_gpc_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
		   unsigned long len)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);

@@ -96,7 +96,7 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,

	return true;
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
EXPORT_SYMBOL_GPL(kvm_gpc_check);

static void gpc_unmap_khva(struct kvm *kvm, kvm_pfn_t pfn, void *khva)
{
@@ -238,8 +238,8 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
	return -EFAULT;
}

int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
				 gpa_t gpa, unsigned long len)
int kvm_gpc_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, gpa_t gpa,
		    unsigned long len)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	unsigned long page_offset = gpa & ~PAGE_MASK;
@@ -333,9 +333,9 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,

	return ret;
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_refresh);
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);

void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
{
	void *old_khva;
	kvm_pfn_t old_pfn;
@@ -360,7 +360,7 @@ void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)

	gpc_unmap_khva(kvm, old_pfn, old_khva);
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
EXPORT_SYMBOL_GPL(kvm_gpc_unmap);

void kvm_gpc_init(struct gfn_to_pfn_cache *gpc)
{
@@ -396,7 +396,7 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
		gpc->active = true;
		write_unlock_irq(&gpc->lock);
	}
	return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
	return kvm_gpc_refresh(kvm, gpc, gpa, len);
}
EXPORT_SYMBOL_GPL(kvm_gpc_activate);

@@ -416,7 +416,7 @@ void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
		list_del(&gpc->list);
		spin_unlock(&kvm->gpc_lock);

		kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
		kvm_gpc_unmap(kvm, gpc);
	}
}
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);