Commit 4d25502a authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86/mmu: replace root_level with cpu_role.base.level



Remove another duplicate field of struct kvm_mmu.  This time it's
the root level for page table walking; the separate field is
always initialized as cpu_role.base.level, so its users can look
up the CPU mode directly instead.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a972e29c
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -438,7 +438,6 @@ struct kvm_mmu {
	struct kvm_mmu_root_info root;
	union kvm_cpu_role cpu_role;
	union kvm_mmu_page_role root_role;
	u8 root_level;
	bool direct_map;

	/*
+7 −11
Original line number Diff line number Diff line
@@ -2132,7 +2132,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
	iterator->level = vcpu->arch.mmu->root_role.level;

	if (iterator->level >= PT64_ROOT_4LEVEL &&
	    vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
	    vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
	    !vcpu->arch.mmu->direct_map)
		iterator->level = PT32E_ROOT_LEVEL;

@@ -3491,7 +3491,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
	 * On SVM, reading PDPTRs might access guest memory, which might fault
	 * and thus might sleep.  Grab the PDPTRs before acquiring mmu_lock.
	 */
	if (mmu->root_level == PT32E_ROOT_LEVEL) {
	if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
		for (i = 0; i < 4; ++i) {
			pdptrs[i] = mmu->get_pdptr(vcpu, i);
			if (!(pdptrs[i] & PT_PRESENT_MASK))
@@ -3515,7 +3515,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
	 * Do we shadow a long mode page table? If so we need to
	 * write-protect the guests page table root.
	 */
	if (mmu->root_level >= PT64_ROOT_4LEVEL) {
	if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
		root = mmu_alloc_root(vcpu, root_gfn, 0,
				      mmu->root_role.level, false);
		mmu->root.hpa = root;
@@ -3554,7 +3554,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
	for (i = 0; i < 4; ++i) {
		WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));

		if (mmu->root_level == PT32E_ROOT_LEVEL) {
		if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) {
			if (!(pdptrs[i] & PT_PRESENT_MASK)) {
				mmu->pae_root[i] = INVALID_PAE_ROOT;
				continue;
@@ -3596,7 +3596,7 @@ static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
	 * equivalent level in the guest's NPT to shadow.  Allocate the tables
	 * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
	 */
	if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
	if (mmu->direct_map || mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL ||
	    mmu->root_role.level < PT64_ROOT_4LEVEL)
		return 0;

@@ -3701,7 +3701,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)

	vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);

	if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
	if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
		hpa_t root = vcpu->arch.mmu->root.hpa;
		sp = to_shadow_page(root);

@@ -4417,7 +4417,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
{
	__reset_rsvds_bits_mask(&context->guest_rsvd_check,
				vcpu->arch.reserved_gpa_bits,
				context->root_level, is_efer_nx(context),
				context->cpu_role.base.level, is_efer_nx(context),
				guest_can_use_gbpages(vcpu),
				is_cr4_pse(context),
				guest_cpuid_is_amd_or_hygon(vcpu));
@@ -4826,7 +4826,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
	context->get_guest_pgd = get_cr3;
	context->get_pdptr = kvm_pdptr_read;
	context->inject_page_fault = kvm_inject_page_fault;
	context->root_level = cpu_role.base.level;

	if (!is_cr0_pg(context))
		context->gva_to_gpa = nonpaging_gva_to_gpa;
@@ -4856,7 +4855,6 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte
		paging64_init_context(context);
	else
		paging32_init_context(context);
	context->root_level = cpu_role.base.level;

	reset_guest_paging_metadata(vcpu, context);
	reset_shadow_zero_bits_mask(vcpu, context);
@@ -4954,7 +4952,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
		context->gva_to_gpa = ept_gva_to_gpa;
		context->sync_page = ept_sync_page;
		context->invlpg = ept_invlpg;
		context->root_level = level;
		context->direct_map = false;
		update_permission_bitmask(context, true);
		context->pkru_mask = 0;
@@ -4990,7 +4987,6 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
	g_context->get_guest_pgd     = get_cr3;
	g_context->get_pdptr         = kvm_pdptr_read;
	g_context->inject_page_fault = kvm_inject_page_fault;
	g_context->root_level        = new_mode.base.level;

	/*
	 * L2 page tables are never shadowed, so there is no need to sync
+2 −2
Original line number Diff line number Diff line
@@ -319,7 +319,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,

	trace_kvm_mmu_pagetable_walk(addr, access);
retry_walk:
	walker->level = mmu->root_level;
	walker->level = mmu->cpu_role.base.level;
	pte           = mmu->get_guest_pgd(vcpu);
	have_ad       = PT_HAVE_ACCESSED_DIRTY(mmu);

@@ -621,7 +621,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
	WARN_ON_ONCE(gw->gfn != base_gfn);
	direct_access = gw->pte_access;

	top_level = vcpu->arch.mmu->root_level;
	top_level = vcpu->arch.mmu->cpu_role.base.level;
	if (top_level == PT32E_ROOT_LEVEL)
		top_level = PT32_ROOT_LEVEL;
	/*