Commit a9442f59 authored by Ben Gardon's avatar Ben Gardon Committed by Paolo Bonzini
Browse files

KVM: x86/mmu: Factor out functions to add/remove TDP MMU pages



Move the work of adding and removing TDP MMU pages to/from  "secondary"
data structures to helper functions. These functions will be built on in
future commits to enable MMU operations to proceed (mostly) in parallel.

No functional change expected.

Signed-off-by: default avatarBen Gardon <bgardon@google.com>
Message-Id: <20210202185734.1680553-20-bgardon@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 531810ca
Loading
Loading
Loading
Loading
+39 −8
Original line number Diff line number Diff line
@@ -262,6 +262,39 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
	}
}

/**
 * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
 *
 * @kvm: kvm instance
 * @sp: the new page
 * @account_nx: This page replaces a NX large page and should be marked for
 *		eventual reclaim.
 */
static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
			      bool account_nx)
{
	lockdep_assert_held_write(&kvm->mmu_lock);

	list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
	if (account_nx)
		account_huge_nx_page(kvm, sp);
}

/**
 * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
 *
 * @kvm: kvm instance
 * @sp: the page to be removed
 */
static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
	lockdep_assert_held_write(&kvm->mmu_lock);

	list_del(&sp->link);
	if (sp->lpage_disallowed)
		unaccount_huge_nx_page(kvm, sp);
}

/**
 * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
 *
@@ -281,10 +314,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt)

	trace_kvm_mmu_prepare_zap_page(sp);

	list_del(&sp->link);

	if (sp->lpage_disallowed)
		unaccount_huge_nx_page(kvm, sp);
	tdp_mmu_unlink_page(kvm, sp);

	for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
		old_child_spte = READ_ONCE(*(pt + i));
@@ -705,15 +735,16 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,

		if (!is_shadow_present_pte(iter.old_spte)) {
			sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
			list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages);
			child_pt = sp->spt;

			tdp_mmu_link_page(vcpu->kvm, sp,
					  huge_page_disallowed &&
					  req_level >= iter.level);

			new_spte = make_nonleaf_spte(child_pt,
						     !shadow_accessed_mask);

			trace_kvm_mmu_get_page(sp, true);
			if (huge_page_disallowed && req_level >= iter.level)
				account_huge_nx_page(vcpu->kvm, sp);

			tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte);
		}
	}