Commit c5c8c7c5 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Make walk_shadow_page_lockless_{begin,end} interoperate with the TDP MMU



Acquire the RCU read lock in walk_shadow_page_lockless_begin and release
it in walk_shadow_page_lockless_end when the TDP MMU is enabled.  This
should not introduce any functional changes but is used in the following
commit to make fast_page_fault interoperate with the TDP MMU.

Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20210713220957.3493520-4-dmatlack@google.com>
[Use if...else instead of if(){return;}]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 61bcd360
Loading
Loading
Loading
Loading
+31 −21
Original line number Diff line number Diff line
@@ -686,6 +686,9 @@ static bool mmu_spte_age(u64 *sptep)

static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
{
	if (is_tdp_mmu(vcpu->arch.mmu)) {
		kvm_tdp_mmu_walk_lockless_begin();
	} else {
		/*
		 * Prevent page table teardown by making any free-er wait during
		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
@@ -698,9 +701,13 @@ static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
		 */
		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
	}
}

static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
	if (is_tdp_mmu(vcpu->arch.mmu)) {
		kvm_tdp_mmu_walk_lockless_end();
	} else {
		/*
		 * Make sure the write to vcpu->mode is not reordered in front of
		 * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
@@ -709,6 +716,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
		local_irq_enable();
	}
}

static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
{
@@ -3617,6 +3625,8 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
/*
 * Return the level of the lowest level SPTE added to sptes.
 * That SPTE may be non-present.
 *
 * Must be called between walk_shadow_page_lockless_{begin,end}.
 */
static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
{
@@ -3624,8 +3634,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
	int leaf = -1;
	u64 spte;

	walk_shadow_page_lockless_begin(vcpu);

	for (shadow_walk_init(&iterator, vcpu, addr),
	     *root_level = iterator.level;
	     shadow_walk_okay(&iterator);
@@ -3639,8 +3647,6 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level
			break;
	}

	walk_shadow_page_lockless_end(vcpu);

	return leaf;
}

@@ -3652,11 +3658,15 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
	int root, leaf, level;
	bool reserved = false;

	walk_shadow_page_lockless_begin(vcpu);

	if (is_tdp_mmu(vcpu->arch.mmu))
		leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
	else
		leaf = get_walk(vcpu, addr, sptes, &root);

	walk_shadow_page_lockless_end(vcpu);

	if (unlikely(leaf < 0)) {
		*sptep = 0ull;
		return reserved;
+2 −4
Original line number Diff line number Diff line
@@ -1516,6 +1516,8 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
/*
 * Return the level of the lowest level SPTE added to sptes.
 * That SPTE may be non-present.
 *
 * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
 */
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
			 int *root_level)
@@ -1527,14 +1529,10 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,

	*root_level = vcpu->arch.mmu->shadow_root_level;

	rcu_read_lock();

	tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
		leaf = iter.level;
		sptes[leaf] = iter.old_spte;
	}

	rcu_read_unlock();

	return leaf;
}
+10 −0
Original line number Diff line number Diff line
@@ -77,6 +77,16 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
				   struct kvm_memory_slot *slot, gfn_t gfn,
				   int min_level);

static inline void kvm_tdp_mmu_walk_lockless_begin(void)
{
	rcu_read_lock();
}

static inline void kvm_tdp_mmu_walk_lockless_end(void)
{
	rcu_read_unlock();
}

int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
			 int *root_level);