Commit dcdee4d1 authored by Sean Christopherson's avatar Sean Christopherson Committed by Yu Zhang
Browse files

KVM: x86/mmu: Pass address space ID to TDP MMU root walkers

mainline inclusion
from mainline-v5.13-rc1
commit a3f15bda
category: feature
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I7S3VQ
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=a3f15bda46e85c33e55b23aa51dd542453f134e3



----------------------------------------------------------------------

Move the address space ID check that is performed when iterating over
roots into the macro helpers to consolidate code.

No functional change intended.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210326021957.1424875-7-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

conflicts:
	arch/x86/kvm/mmu/tdp_mmu.c

Signed-off-by: default avatarYu Zhang <yu.c.zhang@linux.intel.com>
parent 3bba06de
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -78,9 +78,14 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
	return to_shadow_page(__pa(sptep));
}

static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
{
	return role.smm ? 1 : 0;
}

static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
{
	return sp->role.smm ? 1 : 0;
	return kvm_mmu_role_as_id(sp->role);
}

static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
+39 −72
Original line number Diff line number Diff line
@@ -76,14 +76,18 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
 * if exiting the loop early, the caller must drop the reference to the most
 * recent root. (Unless keeping a live reference is desirable.)
 */
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)				\
#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)		\
	for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,	\
				      typeof(*_root), link);		\
	     tdp_mmu_next_root_valid(_kvm, _root);			\
	     _root = tdp_mmu_next_root(_kvm, _root))
	     _root = tdp_mmu_next_root(_kvm, _root))			\
		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
		} else

#define for_each_tdp_mmu_root(_kvm, _root)				\
	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
#define for_each_tdp_mmu_root(_kvm, _root, _as_id)			\
	list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)	\
		if (kvm_mmu_page_as_id(_root) != _as_id) {		\
		} else

static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
			  gfn_t start, gfn_t end, bool can_yield, bool flush);
@@ -148,7 +152,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
	role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);

	/* Check for an existing root before allocating a new one. */
	for_each_tdp_mmu_root(kvm, root) {
	for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
		if (root->role.word == role.word) {
			kvm_mmu_get_root(kvm, root);
			goto out;
@@ -704,11 +708,8 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
{
	struct kvm_mmu_page *root;

	for_each_tdp_mmu_root_yield_safe(kvm, root) {
		if (kvm_mmu_page_as_id(root) != as_id)
			continue;
	for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
		flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
	}

	return flush;
}
@@ -888,8 +889,8 @@ static __always_inline int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
	int ret = 0;
	int as_id;

	for_each_tdp_mmu_root_yield_safe(kvm, root) {
		as_id = kvm_mmu_page_as_id(root);
	for (as_id = 0; as_id < KVM_ADDRESS_SPACE_NUM; as_id++) {
		for_each_tdp_mmu_root_yield_safe(kvm, root, as_id) {
			slots = __kvm_memslots(kvm, as_id);
			kvm_for_each_memslot(memslot, slots) {
				unsigned long hva_start, hva_end;
@@ -911,6 +912,7 @@ static __always_inline int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
					gfn_end, data);
			}
		}
	}

	return ret;
}
@@ -1120,17 +1122,11 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
			     int min_level)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

	for_each_tdp_mmu_root_yield_safe(kvm, root) {
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
		spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
			     slot->base_gfn + slot->npages, min_level);
	}

	return spte_set;
}
@@ -1188,17 +1184,11 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

	for_each_tdp_mmu_root_yield_safe(kvm, root) {
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
		spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
				slot->base_gfn + slot->npages);
	}

	return spte_set;
}
@@ -1260,17 +1250,11 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
				       bool wrprot)
{
	struct kvm_mmu_page *root;
	int root_as_id;

	lockdep_assert_held_write(&kvm->mmu_lock);
	for_each_tdp_mmu_root(kvm, root) {
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

	for_each_tdp_mmu_root(kvm, root, slot->as_id)
		clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
}
}

/*
 * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
@@ -1312,17 +1296,11 @@ static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

	for_each_tdp_mmu_root_yield_safe(kvm, root) {
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
		spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
				slot->base_gfn + slot->npages);
	}
	return spte_set;
}

@@ -1376,15 +1354,9 @@ bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
				       struct kvm_memory_slot *slot, bool flush)
{
	struct kvm_mmu_page *root;
	int root_as_id;

	for_each_tdp_mmu_root_yield_safe(kvm, root) {
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

	for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id)
		flush = zap_collapsible_spte_range(kvm, root, slot, flush);
	}

	return flush;
}
@@ -1428,17 +1400,12 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
				   struct kvm_memory_slot *slot, gfn_t gfn)
{
	struct kvm_mmu_page *root;
	int root_as_id;
	bool spte_set = false;

	lockdep_assert_held_write(&kvm->mmu_lock);
	for_each_tdp_mmu_root(kvm, root) {
		root_as_id = kvm_mmu_page_as_id(root);
		if (root_as_id != slot->as_id)
			continue;

	for_each_tdp_mmu_root(kvm, root, slot->as_id)
		spte_set |= write_protect_gfn(kvm, root, gfn);
	}

	return spte_set;
}