Commit 2a203305 authored by Sean Christopherson's avatar Sean Christopherson Committed by Yu Zhang
Browse files

KVM: x86/mmu: Allocate the lm_root before allocating PAE roots

mainline inclusion
from mainline-v5.13-rc1
commit ba0a194f
category: feature
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I7S3VQ
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ba0a194ffbfb4168a277fb2116e8362013e2078f



----------------------------------------------------------------------

Allocate lm_root before the PAE roots so that the PAE roots aren't
leaked if the memory allocation for the lm_root happens to fail.

Note, KVM can still leak PAE roots if mmu_check_root() fails on a guest's
PDPTR, or if mmu_alloc_root() fails due to MMU pages not being available.
Those issues will be fixed in future commits.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20210305011101.3597423-5-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>

conflict:
       arch/x86/kvm/mmu/mmu.c

Signed-off-by: default avatarYu Zhang <yu.c.zhang@linux.intel.com>
parent 54e3f430
Loading
Loading
Loading
Loading
+41 −55
Original line number Diff line number Diff line
@@ -3316,21 +3316,37 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
	 * the shadow page table may be a PAE or a long mode page table.
	 */
	pm_mask = PT_PRESENT_MASK;
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL)
		pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;

	/*
		 * Allocate the page for the PDPTEs when shadowing 32-bit NPT
		 * with 64-bit only when needed.  Unlike 32-bit NPT, it doesn't
		 * need to be in low mem.  See also pml4_root below.
	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
	 * tables are allocated and initialized at root creation as there is no
	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
	 * on demand, as running a 32-bit L1 VMM is very rare.  Unlike 32-bit
	 * NPT, the PDP table doesn't need to be in low mem.  Preallocate the
	 * pages so that the PAE roots aren't leaked on failure.
	 */
		if (!mmu->pae_root) {
			WARN_ON_ONCE(!tdp_enabled);
	if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
	    (!mmu->pae_root)) {
		u64 *pml4_root, *pae_root;

			mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
			if (!mmu->pae_root)
		pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
		if (!pae_root)
			return -ENOMEM;

		if (!mmu->pml4_root) {
			pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
			if (!pml4_root) {
				free_page((unsigned long)pae_root);
				return -ENOMEM;
			}

			mmu->pae_root = pae_root;
			mmu->pml4_root = pml4_root;

			pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
		}
	}

	for (i = 0; i < 4; ++i) {
@@ -3352,44 +3368,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
			return -ENOSPC;
		mmu->pae_root[i] = root | pm_mask;
	}
	mmu->root_hpa = __pa(mmu->pae_root);

	/*
	 * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
	 * tables are allocated and initialized at MMU creation as there is no
	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
	 * on demand, as running a 32-bit L1 VMM is very rare.  The PDP is
	 * handled above (to share logic with PAE), deal with the PML4 here.
	 */
	if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
		if (mmu->pml4_root == NULL) {
			u64 *pml4_root;

			pml4_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
			if (!pml4_root)
				return -ENOMEM;

			pml4_root[0] = __pa(mmu->pae_root) | pm_mask;

			mmu->pml4_root = pml4_root;
		}

		mmu->root_hpa = __pa(mmu->pml4_root);
	}
#ifdef CONFIG_X86_64
	if (mmu->shadow_root_level == PT64_ROOT_5LEVEL) {
		if (mmu->pml4_root == NULL) {
			u64 *pml4_root;

			pml4_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
			if (!pml4_root)
				return -ENOMEM;

			pml4_root[0] = __pa(mmu->pae_root) | pm_mask;

			mmu->pml4_root = pml4_root;
		}
		if (mmu->pml5_root == NULL) {
	if (mmu->shadow_root_level == PT64_ROOT_5LEVEL &&
	    mmu->pml5_root == NULL) {
		u64 *pml5_root;

		pml5_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
@@ -3397,13 +3379,17 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
			return -ENOMEM;

		pml5_root[0] = __pa(mmu->pml4_root) | pm_mask;

		mmu->pml5_root = pml5_root;
	}
		mmu->root_hpa = __pa(vcpu->arch.mmu->pml5_root);
	}
#endif

	if (mmu->shadow_root_level == PT64_ROOT_5LEVEL)
		mmu->root_hpa = __pa(mmu->pml5_root);
	else if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
		mmu->root_hpa = __pa(mmu->pml4_root);
	else
		mmu->root_hpa = __pa(mmu->pae_root);

set_root_pgd:
	mmu->root_pgd = root_pgd;