Commit 274e0e4c authored by Ben Gardon's avatar Ben Gardon Committed by Yu Zhang
Browse files

KVM: x86/mmu: Allow zapping collapsible SPTEs to use MMU read lock

mainline inclusion
from mainline-v5.13-rc1
commit 2db6f772
category: feature
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I7S3VQ
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=2db6f772b530eedcf69069e63dd7c4fdf05305fc



----------------------------------------------------------------------

To reduce the impact of disabling dirty logging, change the TDP MMU
function which zaps collapsible SPTEs to run under the MMU read lock.
This way, page faults on zapped SPTEs can proceed in parallel with
kvm_mmu_zap_collapsible_sptes.

Signed-off-by: default avatarBen Gardon <bgardon@google.com>
Message-Id: <20210401233736.638171-11-bgardon@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarYu Zhang <yu.c.zhang@linux.intel.com>
parent 18d66167
Loading
Loading
Loading
Loading
+10 −4
Original line number Diff line number Diff line
@@ -5695,13 +5695,19 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
	write_lock(&kvm->mmu_lock);
	flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);

	if (is_tdp_mmu_enabled(kvm))
		flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);

	if (flush)
		kvm_arch_flush_remote_tlbs_memslot(kvm, slot);

	write_unlock(&kvm->mmu_lock);

	if (is_tdp_mmu_enabled(kvm)) {
		flush = false;

		read_lock(&kvm->mmu_lock);
		flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
		if (flush)
			kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
		read_unlock(&kvm->mmu_lock);
	}
}

void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+13 −4
Original line number Diff line number Diff line
@@ -1308,7 +1308,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
	rcu_read_lock();

	tdp_root_for_each_pte(iter, root, start, end) {
		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
retry:
		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
			flush = false;
			continue;
		}
@@ -1323,8 +1324,14 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
							    pfn, PG_LEVEL_NUM))
			continue;

		tdp_mmu_set_spte(kvm, &iter, 0);

		if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
			/*
			 * The iter must explicitly re-read the SPTE because
			 * the atomic cmpxchg failed.
			 */
			iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
			goto retry;
		}
		flush = true;
	}

@@ -1343,7 +1350,9 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
{
	struct kvm_mmu_page *root;

	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, false)
	lockdep_assert_held_read(&kvm->mmu_lock);

	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
		flush = zap_collapsible_spte_range(kvm, root, slot, flush);

	return flush;