Commit 0cd8dc73 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86/mmu: pull call to drop_large_spte() into __link_shadow_page()



Before allocating a child shadow page table, all callers check
whether the parent already points to a huge page and, if so, they
drop that SPTE.  This is done by drop_large_spte().

However, dropping the large SPTE is really only necessary before the
sp is installed.  While the sp is returned by kvm_mmu_get_child_sp(),
installing it happens later in __link_shadow_page().  Move the call
there instead of having it in each and every caller.

To ensure that the shadow page is not linked twice if it was present,
do _not_ opportunistically make kvm_mmu_get_child_sp() idempotent:
instead, return an error value if the shadow page already existed.
This is a bit more verbose, but clearer than NULL.

Finally, now that the drop_large_spte() name is not taken anymore,
remove the two underscores in front of __drop_large_spte().

Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 20d49186
Loading
Loading
Loading
Loading
+21 −22
Original line number Diff line number Diff line
@@ -1135,27 +1135,17 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
		rmap_remove(kvm, sptep);
}


static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
static void drop_large_spte(struct kvm *kvm, u64 *sptep)
{
	if (is_large_pte(*sptep)) {
		WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
		drop_spte(kvm, sptep);
		return true;
	}

	return false;
}
	struct kvm_mmu_page *sp;

static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
	if (__drop_large_spte(vcpu->kvm, sptep)) {
		struct kvm_mmu_page *sp = sptep_to_sp(sptep);
	sp = sptep_to_sp(sptep);
	WARN_ON(sp->role.level == PG_LEVEL_4K);

		kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
	drop_spte(kvm, sptep);
	kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
			KVM_PAGES_PER_HPAGE(sp->role.level));
}
}

/*
 * Write-protect on the specified @sptep, @pt_protect indicates whether
@@ -2221,6 +2211,9 @@ static struct kvm_mmu_page *kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu,
{
	union kvm_mmu_page_role role;

	if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
		return ERR_PTR(-EEXIST);

	role = kvm_mmu_child_role(sptep, direct, access);
	return kvm_mmu_get_shadow_page(vcpu, gfn, role);
}
@@ -2288,13 +2281,21 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
	__shadow_walk_next(iterator, *iterator->sptep);
}

static void __link_shadow_page(struct kvm_mmu_memory_cache *cache, u64 *sptep,
static void __link_shadow_page(struct kvm *kvm,
			       struct kvm_mmu_memory_cache *cache, u64 *sptep,
			       struct kvm_mmu_page *sp)
{
	u64 spte;

	BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);

	/*
	 * If an SPTE is present already, it must be a leaf and therefore
	 * a large one.  Drop it and flush the TLB before installing sp.
	 */
	if (is_shadow_present_pte(*sptep))
		drop_large_spte(kvm, sptep);

	spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));

	mmu_spte_set(sptep, spte);
@@ -2308,7 +2309,7 @@ static void __link_shadow_page(struct kvm_mmu_memory_cache *cache, u64 *sptep,
static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
			     struct kvm_mmu_page *sp)
{
	__link_shadow_page(&vcpu->arch.mmu_pte_list_desc_cache, sptep, sp);
	__link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp);
}

static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
@@ -3080,11 +3081,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
		if (it.level == fault->goal_level)
			break;

		drop_large_spte(vcpu, it.sptep);
		if (is_shadow_present_pte(*it.sptep))
			continue;

		sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL);
		if (sp == ERR_PTR(-EEXIST))
			continue;

		link_shadow_page(vcpu, it.sptep, sp);
		if (fault->is_tdp && fault->huge_page_disallowed &&
+14 −17
Original line number Diff line number Diff line
@@ -648,15 +648,13 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
		gfn_t table_gfn;

		clear_sp_write_flooding_count(it.sptep);
		drop_large_spte(vcpu, it.sptep);

		sp = NULL;
		if (!is_shadow_present_pte(*it.sptep)) {
		table_gfn = gw->table_gfn[it.level - 2];
		access = gw->pt_access[it.level - 2];
		sp = kvm_mmu_get_child_sp(vcpu, it.sptep, table_gfn,
					  false, access);

		if (sp != ERR_PTR(-EEXIST)) {
			/*
			 * We must synchronize the pagetable before linking it
			 * because the guest doesn't need to flush tlb when
@@ -685,7 +683,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
		if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
			goto out_gpte_changed;

		if (sp)
		if (sp != ERR_PTR(-EEXIST))
			link_shadow_page(vcpu, it.sptep, sp);
	}

@@ -709,17 +707,16 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,

		validate_direct_spte(vcpu, it.sptep, direct_access);

		drop_large_spte(vcpu, it.sptep);

		if (!is_shadow_present_pte(*it.sptep)) {
		sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn,
					  true, direct_access);
		if (sp == ERR_PTR(-EEXIST))
			continue;

		link_shadow_page(vcpu, it.sptep, sp);
		if (fault->huge_page_disallowed &&
		    fault->req_level >= it.level)
			account_huge_nx_page(vcpu->kvm, sp);
	}
	}

	if (WARN_ON_ONCE(it.level != fault->goal_level))
		return -EFAULT;