Commit 6e8eb206 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: fast_page_fault support for the TDP MMU



Make fast_page_fault interoperate with the TDP MMU by leveraging
walk_shadow_page_lockless_{begin,end} to acquire the RCU read lock and
introducing a new helper function kvm_tdp_mmu_fast_pf_get_last_sptep to
grab the lowest level sptep.

Suggested-by: default avatarBen Gardon <bgardon@google.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20210713220957.3493520-5-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c5c8c7c5
Loading
Loading
Loading
Loading
+37 −13
Original line number Diff line number Diff line
@@ -3108,15 +3108,41 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte)
	return spte & PT_PRESENT_MASK;
}

/*
 * Returns the last level spte pointer of the shadow page walk for the given
 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
 * walk could be performed, returns NULL and *spte does not contain valid data.
 *
 * Contract:
 *  - Must be called between walk_shadow_page_lockless_{begin,end}.
 *  - The returned sptep must not be used after walk_shadow_page_lockless_end.
 */
static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
{
	struct kvm_shadow_walk_iterator iterator;
	u64 old_spte;
	u64 *sptep = NULL;

	for_each_shadow_entry_lockless(vcpu, gpa, iterator, old_spte) {
		sptep = iterator.sptep;
		*spte = old_spte;

		if (!is_shadow_present_pte(old_spte))
			break;
	}

	return sptep;
}

/*
 * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
 */
static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
{
	struct kvm_shadow_walk_iterator iterator;
	struct kvm_mmu_page *sp;
	int ret = RET_PF_INVALID;
	u64 spte = 0ull;
	u64 *sptep = NULL;
	uint retry_count = 0;

	if (!page_fault_can_be_fast(error_code))
@@ -3127,14 +3153,15 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
	do {
		u64 new_spte;

		for_each_shadow_entry_lockless(vcpu, gpa, iterator, spte)
			if (!is_shadow_present_pte(spte))
				break;
		if (is_tdp_mmu(vcpu->arch.mmu))
			sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, gpa, &spte);
		else
			sptep = fast_pf_get_last_sptep(vcpu, gpa, &spte);

		if (!is_shadow_present_pte(spte))
			break;

		sp = sptep_to_sp(iterator.sptep);
		sp = sptep_to_sp(sptep);
		if (!is_last_spte(spte, sp->role.level))
			break;

@@ -3192,8 +3219,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
		 * since the gfn is not stable for indirect shadow page. See
		 * Documentation/virt/kvm/locking.rst to get more detail.
		 */
		if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte,
					    new_spte)) {
		if (fast_pf_fix_direct_spte(vcpu, sp, sptep, spte, new_spte)) {
			ret = RET_PF_FIXED;
			break;
		}
@@ -3206,7 +3232,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)

	} while (true);

	trace_fast_page_fault(vcpu, gpa, error_code, iterator.sptep, spte, ret);
	trace_fast_page_fault(vcpu, gpa, error_code, sptep, spte, ret);
	walk_shadow_page_lockless_end(vcpu);

	return ret;
@@ -3841,11 +3867,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
	if (page_fault_handle_page_track(vcpu, error_code, gfn))
		return RET_PF_EMULATE;

	if (!is_tdp_mmu_fault) {
	r = fast_page_fault(vcpu, gpa, error_code);
	if (r != RET_PF_INVALID)
		return r;
	}

	r = mmu_topup_memory_caches(vcpu, false);
	if (r)
+41 −0
Original line number Diff line number Diff line
@@ -527,6 +527,10 @@ static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm,
	if (is_removed_spte(iter->old_spte))
		return false;

	/*
	 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
	 * does not hold the mmu_lock.
	 */
	if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
		      new_spte) != iter->old_spte)
		return false;
@@ -1536,3 +1540,40 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,

	return leaf;
}

/*
 * Returns the last level spte pointer of the shadow page walk for the given
 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
 * walk could be performed, returns NULL and *spte does not contain valid data.
 *
 * Contract:
 *  - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
 *  - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
 *
 * WARNING: This function is only intended to be called during fast_page_fault.
 */
u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
					u64 *spte)
{
	struct tdp_iter iter;
	struct kvm_mmu *mmu = vcpu->arch.mmu;
	gfn_t gfn = addr >> PAGE_SHIFT;
	tdp_ptep_t sptep = NULL;

	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
		*spte = iter.old_spte;
		sptep = iter.sptep;
	}

	/*
	 * Perform the rcu_dereference to get the raw spte pointer value since
	 * we are passing it up to fast_page_fault, which is shared with the
	 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
	 * annotation.
	 *
	 * This is safe since fast_page_fault obeys the contracts of this
	 * function as well as all TDP MMU contracts around modifying SPTEs
	 * outside of mmu_lock.
	 */
	return rcu_dereference(sptep);
}
+2 −0
Original line number Diff line number Diff line
@@ -89,6 +89,8 @@ static inline void kvm_tdp_mmu_walk_lockless_end(void)

int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
			 int *root_level);
u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
					u64 *spte);

#ifdef CONFIG_X86_64
bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);