Commit 9f87791d authored by Sean Christopherson's avatar Sean Christopherson Committed by David Woodhouse
Browse files

KVM: Drop KVM's API to allow temporarily unmapping gfn=>pfn cache

Drop kvm_gpc_unmap() as it has no users and unclear requirements.  The
API was added as part of the original gfn_to_pfn_cache support, but its
sole usage[*] was never merged.  Fold the guts of kvm_gpc_unmap() into
the deactivate path and drop the API.  Omit acquiring refresh_lock as
as concurrent calls to kvm_gpc_deactivate() are not allowed (this is
not enforced, e.g. via lockdep. due to it being called during vCPU
destruction).

If/when temporary unmapping makes a comeback, the desirable behavior is
likely to restrict temporary unmapping to vCPU-exclusive mappings and
require the vcpu->mutex be held to serialize unmap.  Use of the
refresh_lock to protect unmapping was somewhat specuatively added by
commit 93984f19 ("KVM: Fully serialize gfn=>pfn cache refresh via
mutex") to guard against concurrent unmaps, but the primary use case of
the temporary unmap, nested virtualization[*], doesn't actually need or
want concurrent unmaps.

[*] https://lore.kernel.org/all/20211210163625.2886-7-dwmw2@infradead.org



Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
parent 0318f207
Loading
Loading
Loading
Loading
+0 −12
Original line number Diff line number Diff line
@@ -1333,18 +1333,6 @@ bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);
 */
int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len);

/**
 * kvm_gpc_unmap - temporarily unmap a gfn_to_pfn_cache.
 *
 * @kvm:	   pointer to kvm instance.
 * @gpc:	   struct gfn_to_pfn_cache object.
 *
 * This unmaps the referenced page. The cache is left in the invalid state
 * but at least the mapping from GPA to userspace HVA will remain cached
 * and can be reused on a subsequent refresh.
 */
void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);

/**
 * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache.
 *
+16 −28
Original line number Diff line number Diff line
@@ -333,33 +333,6 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len)
}
EXPORT_SYMBOL_GPL(kvm_gpc_refresh);

void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
{
	void *old_khva;
	kvm_pfn_t old_pfn;

	mutex_lock(&gpc->refresh_lock);
	write_lock_irq(&gpc->lock);

	gpc->valid = false;

	old_khva = gpc->khva - offset_in_page(gpc->khva);
	old_pfn = gpc->pfn;

	/*
	 * We can leave the GPA → uHVA map cache intact but the PFN
	 * lookup will need to be redone even for the same page.
	 */
	gpc->khva = NULL;
	gpc->pfn = KVM_PFN_ERR_FAULT;

	write_unlock_irq(&gpc->lock);
	mutex_unlock(&gpc->refresh_lock);

	gpc_unmap_khva(old_pfn, old_khva);
}
EXPORT_SYMBOL_GPL(kvm_gpc_unmap);

void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
		  struct kvm_vcpu *vcpu, enum pfn_cache_usage usage)
{
@@ -405,6 +378,8 @@ EXPORT_SYMBOL_GPL(kvm_gpc_activate);
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
{
	struct kvm *kvm = gpc->kvm;
	kvm_pfn_t old_pfn;
	void *old_khva;

	if (gpc->active) {
		/*
@@ -414,13 +389,26 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)
		 */
		write_lock_irq(&gpc->lock);
		gpc->active = false;
		gpc->valid = false;

		/*
		 * Leave the GPA => uHVA cache intact, it's protected by the
		 * memslot generation.  The PFN lookup needs to be redone every
		 * time as mmu_notifier protection is lost when the cache is
		 * removed from the VM's gpc_list.
		 */
		old_khva = gpc->khva - offset_in_page(gpc->khva);
		gpc->khva = NULL;

		old_pfn = gpc->pfn;
		gpc->pfn = KVM_PFN_ERR_FAULT;
		write_unlock_irq(&gpc->lock);

		spin_lock(&kvm->gpc_lock);
		list_del(&gpc->list);
		spin_unlock(&kvm->gpc_lock);

		kvm_gpc_unmap(kvm, gpc);
		gpc_unmap_khva(old_pfn, old_khva);
	}
}
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);