Commit 94c81364 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Decompose kvm_mmu_get_page() into separate functions



Decompose kvm_mmu_get_page() into separate helper functions to increase
readability and prepare for allocating shadow pages without a vcpu
pointer.

Specifically, pull the guts of kvm_mmu_get_page() into 2 helper
functions:

kvm_mmu_find_shadow_page() -
  Walks the page hash checking for any existing mmu pages that match the
  given gfn and role.

kvm_mmu_alloc_shadow_page()
  Allocates and initializes an entirely new kvm_mmu_page. This currently
  requries a vcpu pointer for allocation and looking up the memslot but
  that will be removed in a future commit.

No functional change intended.

Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220516232138.1783324-7-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7f497775
Loading
Loading
Loading
Loading
+39 −13
Original line number Diff line number Diff line
@@ -1993,16 +1993,16 @@ static void clear_sp_write_flooding_count(u64 *spte)
	__clear_sp_write_flooding_count(sptep_to_sp(spte));
}

static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,
static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
						     gfn_t gfn,
						     struct hlist_head *sp_list,
						     union kvm_mmu_page_role role)
{
	struct hlist_head *sp_list;
	struct kvm_mmu_page *sp;
	int ret;
	int collisions = 0;
	LIST_HEAD(invalid_list);

	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
		if (sp->gfn != gfn) {
			collisions++;
@@ -2027,7 +2027,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,

		/* unsync and write-flooding only apply to indirect SPs. */
		if (sp->role.direct)
			goto trace_get_page;
			goto out;

		if (sp->unsync) {
			/*
@@ -2053,14 +2053,26 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,

		__clear_sp_write_flooding_count(sp);

trace_get_page:
		trace_kvm_mmu_get_page(sp, false);
		goto out;
	}

	sp = NULL;
	++vcpu->kvm->stat.mmu_cache_miss;

	sp = kvm_mmu_alloc_page(vcpu, role.direct);
out:
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);

	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
	return sp;
}

static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
						      gfn_t gfn,
						      struct hlist_head *sp_list,
						      union kvm_mmu_page_role role)
{
	struct kvm_mmu_page *sp = kvm_mmu_alloc_page(vcpu, role.direct);

	sp->gfn = gfn;
	sp->role = role;
@@ -2070,12 +2082,26 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,
		if (role.level == PG_LEVEL_4K && kvm_vcpu_write_protect_gfn(vcpu, gfn))
			kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
	}
	trace_kvm_mmu_get_page(sp, true);
out:
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);

	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
	return sp;
}

static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn,
					     union kvm_mmu_page_role role)
{
	struct hlist_head *sp_list;
	struct kvm_mmu_page *sp;
	bool created = false;

	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];

	sp = kvm_mmu_find_shadow_page(vcpu, gfn, sp_list, role);
	if (!sp) {
		created = true;
		sp = kvm_mmu_alloc_shadow_page(vcpu, gfn, sp_list, role);
	}

	trace_kvm_mmu_get_page(sp, created);
	return sp;
}