Commit f0196529 authored by Wei Huang's avatar Wei Huang Committed by Xie Haocheng
Browse files

KVM: x86: Allow CPU to force vendor-specific TDP level

mainline inclusion
from mainline-v5.15
commit 746700d2
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I6B4YT


CVE: NA

--------------------------------

AMD future CPUs will require a 5-level NPT if host CR4.LA57 is set.
To prevent kvm_mmu_get_tdp_level() from incorrectly changing NPT level
on behalf of CPUs, add a new parameter in kvm_configure_mmu() to force
a fixed TDP level.

Signed-off-by: default avatarWei Huang <wei.huang2@amd.com>
Message-Id: <20210818165549.3771014-2-wei.huang2@amd.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarXie Haocheng <haocheng.xie@amd.com>
parent f356501a
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -662,7 +662,6 @@ struct kvm_vcpu_arch {

	unsigned long cr3_lm_rsvd_bits;
	int maxphyaddr;
	int max_tdp_level;

	/* emulate context */

@@ -1626,8 +1625,8 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
		     bool skip_mmu_sync);

void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
		       int tdp_huge_page_level);
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
		       int tdp_max_root_level, int tdp_huge_page_level);

static inline u16 kvm_read_ldt(void)
{
+8 −2
Original line number Diff line number Diff line
@@ -96,6 +96,7 @@ module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
bool tdp_enabled = false;

static int max_huge_page_level __read_mostly;
static int tdp_root_level __read_mostly;
static int max_tdp_level __read_mostly;

enum {
@@ -4515,6 +4516,10 @@ static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,

static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
{
	/* tdp_root_level is architecture forced level, use it if nonzero */
	if (tdp_root_level)
		return tdp_root_level;

	/* Use 5-level TDP if and only if it's useful/necessary. */
	if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48)
		return 4;
@@ -5232,10 +5237,11 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
}
EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);

void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
		       int tdp_huge_page_level)
void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
		       int tdp_max_root_level, int tdp_huge_page_level)
{
	tdp_enabled = enable_tdp;
	tdp_root_level = tdp_forced_root_level;
	max_tdp_level = tdp_max_root_level;

	/*
+3 −1
Original line number Diff line number Diff line
@@ -989,7 +989,9 @@ static __init int svm_hardware_setup(void)
	if (npt_enabled && !npt)
		npt_enabled = false;

	kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
	/* Force VM NPT level equal to the host's max NPT level */
	kvm_configure_mmu(npt_enabled, get_max_npt_level(),
                         get_max_npt_level(), PG_LEVEL_1G);
	pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");

	if (nrips) {
+2 −1
Original line number Diff line number Diff line
@@ -8361,7 +8361,8 @@ static __init int hardware_setup(void)
		ept_lpage_level = PG_LEVEL_2M;
	else
		ept_lpage_level = PG_LEVEL_4K;
	kvm_configure_mmu(enable_ept, vmx_get_max_tdp_level(), ept_lpage_level);
	kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(),
			  ept_lpage_level);

	/*
	 * Only enable PML when hardware supports PML feature, and both EPT