Commit 5276c616 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Add RET_PF_CONTINUE to eliminate bool+int* "returns"

Add RET_PF_CONTINUE and use it in handle_abnormal_pfn() and
kvm_faultin_pfn() to signal that the page fault handler should continue
doing its thing.  Aside from being gross and inefficient, using a boolean
return to signal continue vs. stop makes it extremely difficult to add
more helpers and/or move existing code to a helper.

E.g. hypothetically, if nested MMUs were to gain a separate page fault
handler in the future, everything up to the "is self-modifying PTE" check
can be shared by all shadow MMUs, but communicating up the stack whether
to continue on or stop becomes a nightmare.

More concretely, proposed support for private guest memory ran into a
similar issue, where it'll be forced to forego a helper in order to yield
sane code: https://lore.kernel.org/all/YkJbxiL%2FAz7olWlq@google.com

.

No functional change intended.

Cc: David Matlack <dmatlack@google.com>
Cc: Chao Peng <chao.p.peng@linux.intel.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220423034752.1161007-7-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5c64aba5
Loading
Loading
Loading
Loading
+22 −29
Original line number Diff line number Diff line
@@ -3007,14 +3007,12 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
	return -EFAULT;
}

static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
				unsigned int access, int *ret_val)
static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
			       unsigned int access)
{
	/* The pfn is invalid, report the error! */
	if (unlikely(is_error_pfn(fault->pfn))) {
		*ret_val = kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);
		return true;
	}
	if (unlikely(is_error_pfn(fault->pfn)))
		return kvm_handle_bad_page(vcpu, fault->gfn, fault->pfn);

	if (unlikely(!fault->slot)) {
		gva_t gva = fault->is_tdp ? 0 : fault->addr;
@@ -3032,13 +3030,11 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa
		 * the hardware's).
		 */
		if (unlikely(!enable_mmio_caching) ||
		    unlikely(fault->gfn > kvm_mmu_max_gfn())) {
			*ret_val = RET_PF_EMULATE;
			return true;
		}
		    unlikely(fault->gfn > kvm_mmu_max_gfn()))
			return RET_PF_EMULATE;
	}

	return false;
	return RET_PF_CONTINUE;
}

static bool page_fault_can_be_fast(struct kvm_page_fault *fault)
@@ -3946,7 +3942,7 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
				  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}

static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, int *r)
static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
	struct kvm_memory_slot *slot = fault->slot;
	bool async;
@@ -3957,7 +3953,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
	 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
	 */
	if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
		goto out_retry;
		return RET_PF_RETRY;

	if (!kvm_is_visible_memslot(slot)) {
		/* Don't expose private memslots to L2. */
@@ -3965,7 +3961,7 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
			fault->slot = NULL;
			fault->pfn = KVM_PFN_NOSLOT;
			fault->map_writable = false;
			return false;
			return RET_PF_CONTINUE;
		}
		/*
		 * If the APIC access page exists but is disabled, go directly
@@ -3974,10 +3970,8 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
		 * when the AVIC is re-enabled.
		 */
		if (slot && slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT &&
		    !kvm_apicv_activated(vcpu->kvm)) {
			*r = RET_PF_EMULATE;
			return true;
		}
		    !kvm_apicv_activated(vcpu->kvm))
			return RET_PF_EMULATE;
	}

	async = false;
@@ -3985,26 +3979,23 @@ static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
					  fault->write, &fault->map_writable,
					  &fault->hva);
	if (!async)
		return false; /* *pfn has correct page already */
		return RET_PF_CONTINUE; /* *pfn has correct page already */

	if (!fault->prefetch && kvm_can_do_async_pf(vcpu)) {
		trace_kvm_try_async_get_page(fault->addr, fault->gfn);
		if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
			trace_kvm_async_pf_doublefault(fault->addr, fault->gfn);
			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
			goto out_retry;
		} else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn))
			goto out_retry;
			return RET_PF_RETRY;
		} else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) {
			return RET_PF_RETRY;
		}
	}

	fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, NULL,
					  fault->write, &fault->map_writable,
					  &fault->hva);
	return false;

out_retry:
	*r = RET_PF_RETRY;
	return true;
	return RET_PF_CONTINUE;
}

/*
@@ -4059,10 +4050,12 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	smp_rmb();

	if (kvm_faultin_pfn(vcpu, fault, &r))
	r = kvm_faultin_pfn(vcpu, fault);
	if (r != RET_PF_CONTINUE)
		return r;

	if (handle_abnormal_pfn(vcpu, fault, ACC_ALL, &r))
	r = handle_abnormal_pfn(vcpu, fault, ACC_ALL);
	if (r != RET_PF_CONTINUE)
		return r;

	r = RET_PF_RETRY;
+8 −1
Original line number Diff line number Diff line
@@ -143,6 +143,7 @@ unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
/*
 * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
 *
 * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
 * RET_PF_RETRY: let CPU fault again on the address.
 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
@@ -151,9 +152,15 @@ unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
 *
 * Any names added to this enum should be exported to userspace for use in
 * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h
 *
 * Note, all values must be greater than or equal to zero so as not to encroach
 * on -errno return values.  Somewhat arbitrarily use '0' for CONTINUE, which
 * will allow for efficient machine code when checking for CONTINUE, e.g.
 * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero.
 */
enum {
	RET_PF_RETRY = 0,
	RET_PF_CONTINUE = 0,
	RET_PF_RETRY,
	RET_PF_EMULATE,
	RET_PF_INVALID,
	RET_PF_FIXED,
+1 −0
Original line number Diff line number Diff line
@@ -54,6 +54,7 @@
	{ PFERR_RSVD_MASK, "RSVD" },	\
	{ PFERR_FETCH_MASK, "F" }

TRACE_DEFINE_ENUM(RET_PF_CONTINUE);
TRACE_DEFINE_ENUM(RET_PF_RETRY);
TRACE_DEFINE_ENUM(RET_PF_EMULATE);
TRACE_DEFINE_ENUM(RET_PF_INVALID);
+4 −2
Original line number Diff line number Diff line
@@ -838,10 +838,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
	mmu_seq = vcpu->kvm->mmu_notifier_seq;
	smp_rmb();

	if (kvm_faultin_pfn(vcpu, fault, &r))
	r = kvm_faultin_pfn(vcpu, fault);
	if (r != RET_PF_CONTINUE)
		return r;

	if (handle_abnormal_pfn(vcpu, fault, walker.pte_access, &r))
	r = handle_abnormal_pfn(vcpu, fault, walker.pte_access);
	if (r != RET_PF_CONTINUE)
		return r;

	/*