Commit a54aa15c authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Handle MMIO SPTEs directly in mmu_set_spte()



Now that it should be impossible to convert a valid SPTE to an MMIO SPTE,
handle MMIO SPTEs early in mmu_set_spte() without going through
set_spte() and all the logic for removing an existing, valid SPTE.
The other caller of set_spte(), FNAME(sync_page)(), explicitly handles
MMIO SPTEs prior to calling set_spte().

This simplifies mmu_set_spte() and set_spte(), and also "fixes" an oddity
where MMIO SPTEs are traced by both trace_kvm_mmu_set_spte() and
trace_mark_mmio_spte().

Note, mmu_spte_set() will WARN if this new approach causes KVM to create
an MMIO SPTE overtop a valid SPTE.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210225204749.1512652-8-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 30ab5901
Loading
Loading
Loading
Loading
+5 −17
Original line number Diff line number Diff line
@@ -236,17 +236,6 @@ static unsigned get_mmio_spte_access(u64 spte)
	return spte & shadow_mmio_access_mask;
}

static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
			  kvm_pfn_t pfn, unsigned int access)
{
	if (unlikely(is_noslot_pfn(pfn))) {
		mark_mmio_spte(vcpu, sptep, gfn, access);
		return true;
	}

	return false;
}

static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
{
	u64 kvm_gen, spte_gen, gen;
@@ -2563,9 +2552,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
	struct kvm_mmu_page *sp;
	int ret;

	if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
		return 0;

	sp = sptep_to_sp(sptep);

	ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
@@ -2595,6 +2581,11 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
	pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
		 *sptep, write_fault, gfn);

	if (unlikely(is_noslot_pfn(pfn))) {
		mark_mmio_spte(vcpu, sptep, gfn, pte_access);
		return RET_PF_EMULATE;
	}

	if (is_shadow_present_pte(*sptep)) {
		/*
		 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
@@ -2628,9 +2619,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
		kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
				KVM_PAGES_PER_HPAGE(level));

	if (unlikely(is_mmio_spte(*sptep)))
		ret = RET_PF_EMULATE;

	/*
	 * The fault is fully spurious if and only if the new SPTE and old SPTE
	 * are identical, and emulation is not required.