Commit 3cc736b3 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Pass kvm pointer separately from vcpu to kvm_mmu_find_shadow_page()



Get the kvm pointer from the caller, rather than deriving it from
vcpu->kvm, and plumb the kvm pointer all the way from
kvm_mmu_get_shadow_page(). With this change in place, the vcpu pointer
is only needed to sync indirect shadow pages. In other words,
__kvm_mmu_get_shadow_page() can now be used to get *direct* shadow pages
without a vcpu pointer. This enables eager page splitting, which needs
to allocate direct shadow pages during VM ioctls.

No functional change intended.

Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Message-Id: <20220516232138.1783324-13-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 336081fb
Loading
Loading
Loading
Loading
+15 −13
Original line number Diff line number Diff line
@@ -1975,7 +1975,8 @@ static void clear_sp_write_flooding_count(u64 *spte)
	__clear_sp_write_flooding_count(sptep_to_sp(spte));
}

static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
						     struct kvm_vcpu *vcpu,
						     gfn_t gfn,
						     struct hlist_head *sp_list,
						     union kvm_mmu_page_role role)
@@ -1985,7 +1986,7 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
	int collisions = 0;
	LIST_HEAD(invalid_list);

	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
	for_each_valid_sp(kvm, sp, sp_list) {
		if (sp->gfn != gfn) {
			collisions++;
			continue;
@@ -2002,7 +2003,7 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
			 * upper-level page will be write-protected.
			 */
			if (role.level > PG_LEVEL_4K && sp->unsync)
				kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
				kvm_mmu_prepare_zap_page(kvm, sp,
							 &invalid_list);
			continue;
		}
@@ -2030,7 +2031,7 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,

			WARN_ON(!list_empty(&invalid_list));
			if (ret > 0)
				kvm_flush_remote_tlbs(vcpu->kvm);
				kvm_flush_remote_tlbs(kvm);
		}

		__clear_sp_write_flooding_count(sp);
@@ -2039,13 +2040,13 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
	}

	sp = NULL;
	++vcpu->kvm->stat.mmu_cache_miss;
	++kvm->stat.mmu_cache_miss;

out:
	kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
	kvm_mmu_commit_zap_page(kvm, &invalid_list);

	if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
		vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
	if (collisions > kvm->stat.max_mmu_page_hash_collisions)
		kvm->stat.max_mmu_page_hash_collisions = collisions;
	return sp;
}

@@ -2089,7 +2090,8 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
	return sp;
}

static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm,
						      struct kvm_vcpu *vcpu,
						      struct shadow_page_caches *caches,
						      gfn_t gfn,
						      union kvm_mmu_page_role role)
@@ -2098,12 +2100,12 @@ static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
	struct kvm_mmu_page *sp;
	bool created = false;

	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
	sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];

	sp = kvm_mmu_find_shadow_page(vcpu, gfn, sp_list, role);
	sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
	if (!sp) {
		created = true;
		sp = kvm_mmu_alloc_shadow_page(vcpu->kvm, caches, gfn, sp_list, role);
		sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
	}

	trace_kvm_mmu_get_page(sp, created);
@@ -2120,7 +2122,7 @@ static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
		.gfn_array_cache = &vcpu->arch.mmu_gfn_array_cache,
	};

	return __kvm_mmu_get_shadow_page(vcpu, &caches, gfn, role);
	return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
}

static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, unsigned int access)