Commit 7b574863 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Rename page-track APIs to reflect the new reality



Rename the page-track APIs to capture that they're all about tracking
writes, now that the facade of supporting multiple modes is gone.

Opportunstically replace "slot" with "gfn" in anticipation of removing
the @slot param from the external APIs.

No functional change intended.

Tested-by: default avatarYongwei Ma <yongwei.ma@intel.com>
Link: https://lore.kernel.org/r/20230729013535.1070024-25-seanjc@google.com


Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 338068b5
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -4,10 +4,10 @@

#include <linux/kvm_types.h>

void kvm_slot_page_track_add_page(struct kvm *kvm,
				  struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_slot_page_track_remove_page(struct kvm *kvm,
void kvm_write_track_add_gfn(struct kvm *kvm,
			     struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
				gfn_t gfn);

#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
/*
+4 −4
Original line number Diff line number Diff line
@@ -831,7 +831,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)

	/* the non-leaf shadow pages are keeping readonly. */
	if (sp->role.level > PG_LEVEL_4K)
		return kvm_slot_page_track_add_page(kvm, slot, gfn);
		return kvm_write_track_add_gfn(kvm, slot, gfn);

	kvm_mmu_gfn_disallow_lpage(slot, gfn);

@@ -877,7 +877,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
	slots = kvm_memslots_for_spte_role(kvm, sp->role);
	slot = __gfn_to_memslot(slots, gfn);
	if (sp->role.level > PG_LEVEL_4K)
		return kvm_slot_page_track_remove_page(kvm, slot, gfn);
		return kvm_write_track_remove_gfn(kvm, slot, gfn);

	kvm_mmu_gfn_allow_lpage(slot, gfn);
}
@@ -2807,7 +2807,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
	 * track machinery is used to write-protect upper-level shadow pages,
	 * i.e. this guards the role.level == 4K assertion below!
	 */
	if (kvm_slot_page_track_is_active(kvm, slot, gfn))
	if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
		return -EPERM;

	/*
@@ -4201,7 +4201,7 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
	 * guest is writing the page which is write tracked which can
	 * not be fixed by page fault handler.
	 */
	if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn))
	if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
		return true;

	return false;
+9 −11
Original line number Diff line number Diff line
@@ -84,8 +84,8 @@ static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn,
 * @slot: the @gfn belongs to.
 * @gfn: the guest page.
 */
void kvm_slot_page_track_add_page(struct kvm *kvm,
				  struct kvm_memory_slot *slot, gfn_t gfn)
void kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
			     gfn_t gfn)
{
	if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm)))
		return;
@@ -101,12 +101,11 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
	if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
		kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn);

/*
 * remove the guest page from the tracking pool which stops the interception
 * of corresponding access on that page. It is the opposed operation of
 * kvm_slot_page_track_add_page().
 * of corresponding access on that page.
 *
 * It should be called under the protection both of mmu-lock and kvm->srcu
 * or kvm->slots_lock.
@@ -115,7 +114,7 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
 * @slot: the @gfn belongs to.
 * @gfn: the guest page.
 */
void kvm_slot_page_track_remove_page(struct kvm *kvm,
void kvm_write_track_remove_gfn(struct kvm *kvm,
				struct kvm_memory_slot *slot, gfn_t gfn)
{
	if (WARN_ON_ONCE(!kvm_page_track_write_tracking_enabled(kvm)))
@@ -129,14 +128,13 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
	 */
	kvm_mmu_gfn_allow_lpage(slot, gfn);
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn);

/*
 * check if the corresponding access on the specified guest page is tracked.
 */
bool kvm_slot_page_track_is_active(struct kvm *kvm,
				   const struct kvm_memory_slot *slot,
				   gfn_t gfn)
bool kvm_gfn_is_write_tracked(struct kvm *kvm,
			      const struct kvm_memory_slot *slot, gfn_t gfn)
{
	int index;

+2 −2
Original line number Diff line number Diff line
@@ -15,7 +15,7 @@ int kvm_page_track_create_memslot(struct kvm *kvm,
				  struct kvm_memory_slot *slot,
				  unsigned long npages);

bool kvm_slot_page_track_is_active(struct kvm *kvm,
bool kvm_gfn_is_write_tracked(struct kvm *kvm,
			      const struct kvm_memory_slot *slot, gfn_t gfn);

#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
+2 −2
Original line number Diff line number Diff line
@@ -1564,7 +1564,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
	}

	write_lock(&kvm->mmu_lock);
	kvm_slot_page_track_add_page(kvm, slot, gfn);
	kvm_write_track_add_gfn(kvm, slot, gfn);
	write_unlock(&kvm->mmu_lock);

	srcu_read_unlock(&kvm->srcu, idx);
@@ -1593,7 +1593,7 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
	}

	write_lock(&kvm->mmu_lock);
	kvm_slot_page_track_remove_page(kvm, slot, gfn);
	kvm_write_track_remove_gfn(kvm, slot, gfn);
	write_unlock(&kvm->mmu_lock);
	srcu_read_unlock(&kvm->srcu, idx);