Commit d0d96121 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: Use enum to track if cached PFN will be used in guest and/or host



Replace the guest_uses_pa and kernel_map booleans in the PFN cache code
with a unified enum/bitmask. Using explicit names makes it easier to
review and audit call sites.

Opportunistically add a WARN to prevent passing garbage; instantating a
cache without declaring its usage is either buggy or pointless.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220303154127.202856-2-dwmw2@infradead.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 4a9e7b9e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -39,7 +39,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
	}

	do {
		ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true,
		ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
						gpa, PAGE_SIZE, false);
		if (ret)
			goto out;
+8 −8
Original line number Diff line number Diff line
@@ -1231,11 +1231,12 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 * @gpc:	   struct gfn_to_pfn_cache object.
 * @vcpu:	   vCPU to be used for marking pages dirty and to be woken on
 *		   invalidation.
 * @guest_uses_pa: indicates that the resulting host physical PFN is used while
 *		   @vcpu is IN_GUEST_MODE; invalidations of the cache from MMU
 *		   notifiers (but not for KVM memslot changes!) will also force
 *		   @vcpu to exit the guest to refresh the cache.
 * @kernel_map:    requests a kernel virtual mapping (kmap / memremap).
 * @usage:	   indicates if the resulting host physical PFN is used while
 *		   the @vcpu is IN_GUEST_MODE (in which case invalidation of 
 *		   the cache from MMU notifiers---but not for KVM memslot
 *		   changes!---will also force @vcpu to exit the guest and
 *		   refresh the cache); and/or if the PFN used directly
 *		   by KVM (and thus needs a kernel virtual mapping).
 * @gpa:	   guest physical address to map.
 * @len:	   sanity check; the range being access must fit a single page.
 * @dirty:         mark the cache dirty immediately.
@@ -1250,9 +1251,8 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
 * accessing the target page.
 */
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
			      struct kvm_vcpu *vcpu, bool guest_uses_pa,
			      bool kernel_map, gpa_t gpa, unsigned long len,
			      bool dirty);
			      struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
			      gpa_t gpa, unsigned long len, bool dirty);

/**
 * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
+8 −2
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@ struct kvm_memslots;

enum kvm_mr_change;

#include <linux/bits.h>
#include <linux/types.h>
#include <linux/spinlock_types.h>

@@ -46,6 +47,12 @@ typedef u64 hfn_t;

typedef hfn_t kvm_pfn_t;

enum pfn_cache_usage {
	KVM_GUEST_USES_PFN = BIT(0),
	KVM_HOST_USES_PFN  = BIT(1),
	KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
};

struct gfn_to_hva_cache {
	u64 generation;
	gpa_t gpa;
@@ -64,11 +71,10 @@ struct gfn_to_pfn_cache {
	rwlock_t lock;
	void *khva;
	kvm_pfn_t pfn;
	enum pfn_cache_usage usage;
	bool active;
	bool valid;
	bool dirty;
	bool kernel_map;
	bool guest_uses_pa;
};

#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
+7 −7
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
			 * If a guest vCPU could be using the physical address,
			 * it needs to be forced out of guest mode.
			 */
			if (gpc->guest_uses_pa) {
			if (gpc->usage & KVM_GUEST_USES_PFN) {
				if (!evict_vcpus) {
					evict_vcpus = true;
					bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
@@ -224,7 +224,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
			goto map_done;
		}

		if (gpc->kernel_map) {
		if (gpc->usage & KVM_HOST_USES_PFN) {
			if (new_pfn == old_pfn) {
				new_khva = old_khva;
				old_pfn = KVM_PFN_ERR_FAULT;
@@ -304,10 +304,11 @@ EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);


int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
			      struct kvm_vcpu *vcpu, bool guest_uses_pa,
			      bool kernel_map, gpa_t gpa, unsigned long len,
			      bool dirty)
			      struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
			      gpa_t gpa, unsigned long len, bool dirty)
{
	WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);

	if (!gpc->active) {
		rwlock_init(&gpc->lock);

@@ -315,8 +316,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
		gpc->pfn = KVM_PFN_ERR_FAULT;
		gpc->uhva = KVM_HVA_ERR_BAD;
		gpc->vcpu = vcpu;
		gpc->kernel_map = kernel_map;
		gpc->guest_uses_pa = guest_uses_pa;
		gpc->usage = usage;
		gpc->valid = false;
		gpc->active = true;