Commit 68be1306 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Fold rmap_recycle into rmap_add



Consolidate rmap_recycle and rmap_add into a single function since they
are only ever called together (and only from one place). This has a nice
side effect of eliminating an extra kvm_vcpu_gfn_to_memslot(). In
addition it makes mmu_set_spte(), which is a very long function, a
little shorter.

No functional change intended.

Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20210813203504.2742757-3-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b1a429fb
Loading
Loading
Loading
Loading
+14 −26
Original line number Diff line number Diff line
@@ -1071,20 +1071,6 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
	return kvm_mmu_memory_cache_nr_free_objects(mc);
}

static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
	struct kvm_memory_slot *slot;
	struct kvm_mmu_page *sp;
	struct kvm_rmap_head *rmap_head;

	sp = sptep_to_sp(spte);
	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
	return pte_list_add(vcpu, spte, rmap_head);
}


static void rmap_remove(struct kvm *kvm, u64 *spte)
{
	struct kvm_memslots *slots;
@@ -1097,9 +1083,9 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
	gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);

	/*
	 * Unlike rmap_add and rmap_recycle, rmap_remove does not run in the
	 * context of a vCPU so have to determine which memslots to use based
	 * on context information in sp->role.
	 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
	 * so we have to determine which memslots to use based on context
	 * information in sp->role.
	 */
	slots = kvm_memslots_for_spte_role(kvm, sp->role);

@@ -1639,19 +1625,24 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,

#define RMAP_RECYCLE_THRESHOLD 1000

static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
{
	struct kvm_memory_slot *slot;
	struct kvm_rmap_head *rmap_head;
	struct kvm_mmu_page *sp;
	struct kvm_rmap_head *rmap_head;
	int rmap_count;

	sp = sptep_to_sp(spte);
	kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
	slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
	rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
	rmap_count = pte_list_add(vcpu, spte, rmap_head);

	if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
		kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
	kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
			KVM_PAGES_PER_HPAGE(sp->role.level));
		kvm_flush_remote_tlbs_with_address(
				vcpu->kvm, sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
	}
}

bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -2713,7 +2704,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
			bool host_writable)
{
	int was_rmapped = 0;
	int rmap_count;
	int set_spte_ret;
	int ret = RET_PF_FIXED;
	bool flush = false;
@@ -2772,9 +2762,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,

	if (!was_rmapped) {
		kvm_update_page_stats(vcpu->kvm, level, 1);
		rmap_count = rmap_add(vcpu, sptep, gfn);
		if (rmap_count > RMAP_RECYCLE_THRESHOLD)
			rmap_recycle(vcpu, sptep, gfn);
		rmap_add(vcpu, sptep, gfn);
	}

	return ret;