Commit 8c63e8c2 authored by David Matlack's avatar David Matlack Committed by Sean Christopherson
Browse files

KVM: x86/mmu: Rename kvm_flush_remote_tlbs_with_address()



Rename kvm_flush_remote_tlbs_with_address() to
kvm_flush_remote_tlbs_range(). This name is shorter, which reduces the
number of callsites that need to be broken up across multiple lines, and
more readable since it conveys a range of memory is being flushed rather
than a single address.

No functional change intended.

Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Link: https://lore.kernel.org/r/20230126184025.2294823-5-dmatlack@google.com


Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 28e4b459
Loading
Loading
Loading
Loading
+5 −9
Original line number Diff line number Diff line
@@ -261,8 +261,7 @@ static inline bool kvm_available_flush_tlb_with_range(void)
	return kvm_x86_ops.tlb_remote_flush_with_range;
}

void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
		u64 start_gfn, u64 pages)
void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages)
{
	struct kvm_tlb_range range;
	int ret = -EOPNOTSUPP;
@@ -5922,8 +5921,7 @@ slot_handle_level_range(struct kvm *kvm, const struct kvm_memory_slot *memslot,

		if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
			if (flush && flush_on_yield) {
				kvm_flush_remote_tlbs_with_address(kvm,
						start_gfn,
				kvm_flush_remote_tlbs_range(kvm, start_gfn,
							    iterator.gfn - start_gfn + 1);
				flush = false;
			}
@@ -6279,8 +6277,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
	}

	if (flush)
		kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
						   gfn_end - gfn_start);
		kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);

	kvm_mmu_invalidate_end(kvm, 0, -1ul);

@@ -6669,8 +6666,7 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
	 * is observed by any other operation on the same memslot.
	 */
	lockdep_assert_held(&kvm->slots_lock);
	kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
					   memslot->npages);
	kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
}

void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
+3 −4
Original line number Diff line number Diff line
@@ -170,13 +170,12 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
				    struct kvm_memory_slot *slot, u64 gfn,
				    int min_level);

void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
					u64 start_gfn, u64 pages);
void kvm_flush_remote_tlbs_range(struct kvm *kvm, u64 start_gfn, u64 pages);

/* Flush the given page (huge or not) of guest memory. */
static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
{
	kvm_flush_remote_tlbs_with_address(kvm, gfn_round_for_level(gfn, level),
	kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level),
				    KVM_PAGES_PER_HPAGE(level));
}