Commit 4b6ef1ea authored by Sean Christopherson's avatar Sean Christopherson Committed by Yu Zhang
Browse files

KVM: x86/mmu: Coalesce TDP MMU TLB flushes when zapping collapsible SPTEs

mainline inclusion
from mainline-v5.13-rc1
commit af95b53e
category: feature
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I7S3VQ
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=af95b53e56e34a4df343cec32b3a3276d9d06ad3



----------------------------------------------------------------------

When zapping collapsible SPTEs across multiple roots, gather pending
flushes and perform a single remote TLB flush at the end, as opposed to
flushing after processing every root.

Note, flush may be cleared by the result of zap_collapsible_spte_range().
This is intended and correct, e.g. yielding may have serviced a prior
pending flush.

Cc: Ben Gardon <bgardon@google.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210326021957.1424875-2-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarYu Zhang <yu.c.zhang@linux.intel.com>
parent f01d0909
Loading
Loading
Loading
Loading
+13 −9
Original line number Original line Diff line number Diff line
@@ -1325,21 +1325,21 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
 * Clear leaf entries which could be replaced by large mappings, for
 * Clear leaf entries which could be replaced by large mappings, for
 * GFNs within the slot.
 * GFNs within the slot.
 */
 */
static void zap_collapsible_spte_range(struct kvm *kvm,
static bool zap_collapsible_spte_range(struct kvm *kvm,
				       struct kvm_mmu_page *root,
				       struct kvm_mmu_page *root,
				       struct kvm_memory_slot *slot)
				       struct kvm_memory_slot *slot,
				       bool flush)
{
{
	gfn_t start = slot->base_gfn;
	gfn_t start = slot->base_gfn;
	gfn_t end = start + slot->npages;
	gfn_t end = start + slot->npages;
	struct tdp_iter iter;
	struct tdp_iter iter;
	kvm_pfn_t pfn;
	kvm_pfn_t pfn;
	bool spte_set = false;


	rcu_read_lock();
	rcu_read_lock();


	tdp_root_for_each_pte(iter, root, start, end) {
	tdp_root_for_each_pte(iter, root, start, end) {
		if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
		if (tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
			spte_set = false;
			flush = false;
			continue;
			continue;
		}
		}


@@ -1355,12 +1355,12 @@ static void zap_collapsible_spte_range(struct kvm *kvm,


		tdp_mmu_set_spte(kvm, &iter, 0);
		tdp_mmu_set_spte(kvm, &iter, 0);


		spte_set = true;
		flush = true;
	}
	}


	rcu_read_unlock();
	rcu_read_unlock();
	if (spte_set)

		kvm_flush_remote_tlbs(kvm);
	return flush;
}
}


/*
/*
@@ -1371,6 +1371,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
				       struct kvm_memory_slot *slot)
				       struct kvm_memory_slot *slot)
{
{
	struct kvm_mmu_page *root;
	struct kvm_mmu_page *root;
	bool flush = false;
	int root_as_id;
	int root_as_id;


	for_each_tdp_mmu_root_yield_safe(kvm, root) {
	for_each_tdp_mmu_root_yield_safe(kvm, root) {
@@ -1378,8 +1379,11 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
		if (root_as_id != slot->as_id)
		if (root_as_id != slot->as_id)
			continue;
			continue;


		zap_collapsible_spte_range(kvm, root, slot);
		flush = zap_collapsible_spte_range(kvm, root, slot, flush);
	}
	}

	if (flush)
		kvm_flush_remote_tlbs(kvm);
}
}


/*
/*