Commit 54e3f430 authored by Sean Christopherson's avatar Sean Christopherson Committed by Yu Zhang
Browse files

KVM: x86/mmu: Capture 'mmu' in a local variable when allocating roots

mainline inclusion
from mainline-v5.13-rc1
commit b37233c9
category: feature
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I7S3VQ
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=b37233c911cbecd22a8a2a80137efe706c727d76



----------------------------------------------------------------------

Grab 'mmu' and do s/vcpu->arch.mmu/mmu to shorten line lengths and yield
smaller diffs when moving code around in future cleanup without forcing
the new code to use the same ugly pattern.

No functional change intended.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210305011101.3597423-4-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

conflict:
       arch/x86/kvm/mmu/mmu.c

Signed-off-by: default avatarYu Zhang <yu.c.zhang@linux.intel.com>
parent 6446cae2
Loading
Loading
Loading
Loading
+38 −36
Original line number Diff line number Diff line
@@ -3243,7 +3243,8 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,

static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
	u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
	struct kvm_mmu *mmu = vcpu->arch.mmu;
	u8 shadow_root_level = mmu->shadow_root_level;
	hpa_t root;
	unsigned i;

@@ -3252,42 +3253,43 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)

		if (!VALID_PAGE(root))
			return -ENOSPC;
		vcpu->arch.mmu->root_hpa = root;
		mmu->root_hpa = root;
	} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
		root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level,
				      true);

		if (!VALID_PAGE(root))
			return -ENOSPC;
		vcpu->arch.mmu->root_hpa = root;
		mmu->root_hpa = root;
	} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
		for (i = 0; i < 4; ++i) {
			MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
			MMU_WARN_ON(VALID_PAGE(mmu->pae_root[i]));

			root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
					      i << 30, PT32_ROOT_LEVEL, true);
			if (!VALID_PAGE(root))
				return -ENOSPC;
			vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
			mmu->pae_root[i] = root | PT_PRESENT_MASK;
		}
		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
		mmu->root_hpa = __pa(mmu->pae_root);
	} else
		BUG();

	/* root_pgd is ignored for direct MMUs. */
	vcpu->arch.mmu->root_pgd = 0;
	mmu->root_pgd = 0;

	return 0;
}

static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{
	struct kvm_mmu *mmu = vcpu->arch.mmu;
	u64 pdptr, pm_mask;
	gfn_t root_gfn, root_pgd;
	hpa_t root;
	int i;

	root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
	root_pgd = mmu->get_guest_pgd(vcpu);
	root_gfn = root_pgd >> PAGE_SHIFT;

	if (mmu_check_root(vcpu, root_gfn))
@@ -3297,14 +3299,14 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
	 * Do we shadow a long mode page table? If so we need to
	 * write-protect the guests page table root.
	 */
	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
		MMU_WARN_ON(VALID_PAGE(mmu->root_hpa));

		root = mmu_alloc_root(vcpu, root_gfn, 0,
				      vcpu->arch.mmu->shadow_root_level, false);
				      mmu->shadow_root_level, false);
		if (!VALID_PAGE(root))
			return -ENOSPC;
		vcpu->arch.mmu->root_hpa = root;
		mmu->root_hpa = root;
		goto set_root_pgd;
	}

@@ -3314,7 +3316,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
	 * the shadow page table may be a PAE or a long mode page table.
	 */
	pm_mask = PT_PRESENT_MASK;
	if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;

		/*
@@ -3322,21 +3324,21 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
		 * with 64-bit only when needed.  Unlike 32-bit NPT, it doesn't
		 * need to be in low mem.  See also pml4_root below.
		 */
		if (!vcpu->arch.mmu->pae_root) {
		if (!mmu->pae_root) {
			WARN_ON_ONCE(!tdp_enabled);

			vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
			if (!vcpu->arch.mmu->pae_root)
			mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
			if (!mmu->pae_root)
				return -ENOMEM;
		}
	}

	for (i = 0; i < 4; ++i) {
		MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
		if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
			pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
		MMU_WARN_ON(VALID_PAGE(mmu->pae_root[i]));
		if (mmu->root_level == PT32E_ROOT_LEVEL) {
			pdptr = mmu->get_pdptr(vcpu, i);
			if (!(pdptr & PT_PRESENT_MASK)) {
				vcpu->arch.mmu->pae_root[i] = 0;
				mmu->pae_root[i] = 0;
				continue;
			}
			root_gfn = pdptr >> PAGE_SHIFT;
@@ -3348,9 +3350,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
				      PT32_ROOT_LEVEL, false);
		if (!VALID_PAGE(root))
			return -ENOSPC;
		vcpu->arch.mmu->pae_root[i] = root | pm_mask;
		mmu->pae_root[i] = root | pm_mask;
	}
	vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
	mmu->root_hpa = __pa(mmu->pae_root);

	/*
	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
@@ -3359,51 +3361,51 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
	 * on demand, as running a 32-bit L1 VMM is very rare.  The PDP is
	 * handled above (to share logic with PAE), deal with the PML4 here.
	 */
	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
		if (vcpu->arch.mmu->pml4_root == NULL) {
	if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
		if (mmu->pml4_root == NULL) {
			u64 *pml4_root;

			pml4_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
			if (!pml4_root)
				return -ENOMEM;

			pml4_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
			pml4_root[0] = __pa(mmu->pae_root) | pm_mask;

			vcpu->arch.mmu->pml4_root = pml4_root;
			mmu->pml4_root = pml4_root;
		}

		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pml4_root);
		mmu->root_hpa = __pa(mmu->pml4_root);
	}
#ifdef CONFIG_X86_64
	if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
		if (vcpu->arch.mmu->pml4_root == NULL) {
	if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
		if (mmu->pml4_root == NULL) {
			u64 *pml4_root;

			pml4_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
			if (!pml4_root)
				return -ENOMEM;

			pml4_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
			pml4_root[0] = __pa(mmu->pae_root) | pm_mask;

			vcpu->arch.mmu->pml4_root = pml4_root;
			mmu->pml4_root = pml4_root;
		}
		if (vcpu->arch.mmu->pml5_root == NULL) {
		if (mmu->pml5_root == NULL) {
			u64 *pml5_root;

			pml5_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
			if (!pml5_root)
				return -ENOMEM;

			pml5_root[0] = __pa(vcpu->arch.mmu->pml4_root) | pm_mask;
			pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;

			vcpu->arch.mmu->pml5_root = pml5_root;
			mmu->pml5_root = pml5_root;
		}
		vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pml5_root);
		mmu->root_hpa = __pa(vcpu->arch.mmu->pml5_root);
	}
#endif

set_root_pgd:
	vcpu->arch.mmu->root_pgd = root_pgd;
	mmu->root_pgd = root_pgd;

	return 0;
}