Commit e139a34e authored by Ben Gardon's avatar Ben Gardon Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Merge flush and non-flush tdp_mmu_iter_cond_resched



The flushing and non-flushing variants of tdp_mmu_iter_cond_resched have
almost identical implementations. Merge the two functions and add a
flush parameter.

Signed-off-by: default avatarBen Gardon <bgardon@google.com>
Message-Id: <20210202185734.1680553-12-bgardon@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 8d1a182e
Loading
Loading
Loading
Loading
+13 −29
Original line number Diff line number Diff line
@@ -443,33 +443,13 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
	for_each_tdp_pte(_iter, __va(_mmu->root_hpa),		\
			 _mmu->shadow_root_level, _start, _end)

/*
 * Flush the TLB and yield if the MMU lock is contended or this thread needs to
 * return control to the scheduler.
 *
 * If this function yields, it will also reset the tdp_iter's walk over the
 * paging structure and the calling function should allow the iterator to
 * continue its traversal from the paging structure root.
 *
 * Return true if this function yielded, the TLBs were flushed, and the
 * iterator's traversal was reset. Return false if a yield was not needed.
 */
static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
{
	if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
		kvm_flush_remote_tlbs(kvm);
		cond_resched_lock(&kvm->mmu_lock);
		tdp_iter_refresh_walk(iter);
		return true;
	}

	return false;
}

/*
 * Yield if the MMU lock is contended or this thread needs to return control
 * to the scheduler.
 *
 * If this function should yield and flush is set, it will perform a remote
 * TLB flush before yielding.
 *
 * If this function yields, it will also reset the tdp_iter's walk over the
 * paging structure and the calling function should allow the iterator to
 * continue its traversal from the paging structure root.
@@ -477,9 +457,13 @@ static bool tdp_mmu_iter_flush_cond_resched(struct kvm *kvm, struct tdp_iter *it
 * Return true if this function yielded and the iterator's traversal was reset.
 * Return false if a yield was not needed.
 */
static bool tdp_mmu_iter_cond_resched(struct kvm *kvm, struct tdp_iter *iter)
static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
					     struct tdp_iter *iter, bool flush)
{
	if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
		if (flush)
			kvm_flush_remote_tlbs(kvm);

		cond_resched_lock(&kvm->mmu_lock);
		tdp_iter_refresh_walk(iter);
		return true;
@@ -522,7 +506,7 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
		tdp_mmu_set_spte(kvm, &iter, 0);

		flush_needed = !can_yield ||
			       !tdp_mmu_iter_flush_cond_resched(kvm, &iter);
			       !tdp_mmu_iter_cond_resched(kvm, &iter, true);
	}
	return flush_needed;
}
@@ -894,7 +878,7 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
		spte_set = true;

		tdp_mmu_iter_cond_resched(kvm, &iter);
		tdp_mmu_iter_cond_resched(kvm, &iter, false);
	}
	return spte_set;
}
@@ -953,7 +937,7 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
		tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
		spte_set = true;

		tdp_mmu_iter_cond_resched(kvm, &iter);
		tdp_mmu_iter_cond_resched(kvm, &iter, false);
	}
	return spte_set;
}
@@ -1069,7 +1053,7 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
		tdp_mmu_set_spte(kvm, &iter, new_spte);
		spte_set = true;

		tdp_mmu_iter_cond_resched(kvm, &iter);
		tdp_mmu_iter_cond_resched(kvm, &iter, false);
	}

	return spte_set;
@@ -1121,7 +1105,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,

		tdp_mmu_set_spte(kvm, &iter, 0);

		spte_set = !tdp_mmu_iter_flush_cond_resched(kvm, &iter);
		spte_set = !tdp_mmu_iter_cond_resched(kvm, &iter, true);
	}

	if (spte_set)