Commit 7a7ae829 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: x86/mmu: rename kvm_mmu_role union



It is quite confusing that the "full" union is called kvm_mmu_role
but is used for the "cpu_role" field of struct kvm_mmu.  Rename it
to kvm_cpu_role.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7a458f0e
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -281,7 +281,7 @@ struct kvm_kernel_irq_routing_entry;
/*
 * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
 * also includes TDP pages) to determine whether or not a page can be used in
 * the given MMU context.  This is a subset of the overall kvm_mmu_role to
 * the given MMU context.  This is a subset of the overall kvm_cpu_role to
 * minimize the size of kvm_memory_slot.arch.gfn_track, i.e. allows allocating
 * 2 bytes per gfn instead of 4 bytes per gfn.
 *
@@ -378,7 +378,7 @@ union kvm_mmu_extended_role {
	};
};

union kvm_mmu_role {
union kvm_cpu_role {
	u64 as_u64;
	struct {
		union kvm_mmu_page_role base;
@@ -438,7 +438,7 @@ struct kvm_mmu {
			 struct kvm_mmu_page *sp);
	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
	struct kvm_mmu_root_info root;
	union kvm_mmu_role cpu_role;
	union kvm_cpu_role cpu_role;
	union kvm_mmu_page_role root_role;
	u8 root_level;
	u8 shadow_root_level;
+14 −14
Original line number Diff line number Diff line
@@ -4726,10 +4726,10 @@ static void paging32_init_context(struct kvm_mmu *context)
	context->direct_map = false;
}

static union kvm_mmu_role
static union kvm_cpu_role
kvm_calc_cpu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *regs)
{
	union kvm_mmu_role role = {0};
	union kvm_cpu_role role = {0};

	role.base.access = ACC_ALL;
	role.base.smm = is_smm(vcpu);
@@ -4783,7 +4783,7 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)

static union kvm_mmu_page_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
				union kvm_mmu_role cpu_role)
				union kvm_cpu_role cpu_role)
{
	union kvm_mmu_page_role role = {0};

@@ -4804,7 +4804,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
			     const struct kvm_mmu_role_regs *regs)
{
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
	union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
	union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
	union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role);

	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
@@ -4836,7 +4836,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,

static union kvm_mmu_page_role
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
				   union kvm_mmu_role cpu_role)
				   union kvm_cpu_role cpu_role)
{
	union kvm_mmu_page_role role;

@@ -4862,7 +4862,7 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
}

static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
				    union kvm_mmu_role cpu_role,
				    union kvm_cpu_role cpu_role,
				    union kvm_mmu_page_role root_role)
{
	if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
@@ -4890,7 +4890,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
				const struct kvm_mmu_role_regs *regs)
{
	struct kvm_mmu *context = &vcpu->arch.root_mmu;
	union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
	union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, regs);
	union kvm_mmu_page_role root_role =
		kvm_calc_shadow_mmu_root_page_role(vcpu, cpu_role);

@@ -4899,7 +4899,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,

static union kvm_mmu_page_role
kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
				   union kvm_mmu_role cpu_role)
				   union kvm_cpu_role cpu_role)
{
	union kvm_mmu_page_role role;

@@ -4918,7 +4918,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
		.cr4 = cr4 & ~X86_CR4_PKE,
		.efer = efer,
	};
	union kvm_mmu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
	union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs);
	union kvm_mmu_page_role root_role = kvm_calc_shadow_npt_root_page_role(vcpu, cpu_role);

	shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
@@ -4926,11 +4926,11 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);

static union kvm_mmu_role
static union kvm_cpu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
				   bool execonly, u8 level)
{
	union kvm_mmu_role role = {0};
	union kvm_cpu_role role = {0};

	/*
	 * KVM does not support SMM transfer monitors, and consequently does not
@@ -4957,7 +4957,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
{
	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
	u8 level = vmx_eptp_page_walk_level(new_eptp);
	union kvm_mmu_role new_mode =
	union kvm_cpu_role new_mode =
		kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
						   execonly, level);

@@ -4999,7 +4999,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu,
				const struct kvm_mmu_role_regs *regs)
{
	union kvm_mmu_role new_mode = kvm_calc_cpu_role(vcpu, regs);
	union kvm_cpu_role new_mode = kvm_calc_cpu_role(vcpu, regs);
	struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;

	if (new_mode.as_u64 == g_context->cpu_role.as_u64)
@@ -6276,7 +6276,7 @@ int kvm_mmu_vendor_module_init(void)
	 */
	BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
	BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
	BUILD_BUG_ON(sizeof(union kvm_cpu_role) != sizeof(u64));

	kvm_mmu_reset_all_pte_masks();