Commit ded1d73f authored by Sean Christopherson's avatar Sean Christopherson Committed by Yu Zhang
Browse files

KVM: x86/mmu: Coalesce TLB flushes across address spaces for gfn range zap

mainline inclusion
from mainline-v5.13-rc1
commit 1a61b7db
category: feature
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I7S3VQ
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=1a61b7db7a8dc44ce5010926ed48b519dda92d84



----------------------------------------------------------------------

Gather pending TLB flushes across both address spaces when zapping a
given gfn range.  This requires feeding "flush" back into subsequent
calls, but on the plus side sets the stage for further batching
between the legacy MMU and TDP MMU.  It also allows refactoring the
address space iteration to cover the legacy and TDP MMUs without
introducing truly ugly code.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210326021957.1424875-5-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

conflict:
	arch/x86/kvm/mmu/tdp_mmu.h

Signed-off-by: default avatarYu Zhang <yu.c.zhang@linux.intel.com>
parent 1b834210
Loading
Loading
Loading
Loading
+8 −9
Original line number Original line Diff line number Diff line
@@ -5338,10 +5338,10 @@ typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_
static __always_inline bool
static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
			slot_level_handler fn, int start_level, int end_level,
			slot_level_handler fn, int start_level, int end_level,
			gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield)
			gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
			bool flush)
{
{
	struct slot_rmap_walk_iterator iterator;
	struct slot_rmap_walk_iterator iterator;
	bool flush = false;


	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
	for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
			end_gfn, &iterator) {
			end_gfn, &iterator) {
@@ -5370,7 +5370,7 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
	return slot_handle_level_range(kvm, memslot, fn, start_level,
	return slot_handle_level_range(kvm, memslot, fn, start_level,
			end_level, memslot->base_gfn,
			end_level, memslot->base_gfn,
			memslot->base_gfn + memslot->npages - 1,
			memslot->base_gfn + memslot->npages - 1,
			flush_on_yield);
			flush_on_yield, false);
}
}


static __always_inline bool
static __always_inline bool
@@ -5606,7 +5606,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
	struct kvm_memslots *slots;
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	struct kvm_memory_slot *memslot;
	int i;
	int i;
	bool flush;
	bool flush = false;


	write_lock(&kvm->mmu_lock);
	write_lock(&kvm->mmu_lock);
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
@@ -5622,14 +5622,13 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
			flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
			flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
							PG_LEVEL_4K,
							PG_LEVEL_4K,
							KVM_MAX_HUGEPAGE_LEVEL,
							KVM_MAX_HUGEPAGE_LEVEL,
							start, end - 1, true);
							start, end - 1, true, flush);

			if (flush)
				kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
								   gfn_end);
		}
		}
	}
	}


	if (flush)
		kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);

	if (is_tdp_mmu_enabled(kvm)) {
	if (is_tdp_mmu_enabled(kvm)) {
		flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
		flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
		if (flush)
		if (flush)