Commit b6a7cc35 authored by Jason Baron's avatar Jason Baron Committed by Paolo Bonzini
Browse files

KVM: X86: prepend vmx/svm prefix to additional kvm_x86_ops functions



A subsequent patch introduces macros in preparation for simplifying the
definition for vmx_x86_ops and svm_x86_ops. Making the naming more uniform
expands the coverage of the macros. Add vmx/svm prefix to the following
functions: update_exception_bitmap(), enable_nmi_window(),
enable_irq_window(), update_cr8_intercept and enable_smi_window().

Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarJason Baron <jbaron@akamai.com>
Message-Id: <ed594696f8e2c2b2bfc747504cee9bbb2a269300.1610680941.git.jbaron@akamai.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6e4e3b4d
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -1829,7 +1829,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
	vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
}

static void update_exception_bitmap(struct kvm_vcpu *vcpu)
static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

@@ -3446,7 +3446,7 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
		SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
}

static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
	struct vcpu_svm *svm = to_svm(vcpu);

@@ -3571,7 +3571,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
	return !svm_interrupt_blocked(vcpu);
}

static void enable_irq_window(struct kvm_vcpu *vcpu)
static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

@@ -3595,7 +3595,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
	}
}

static void enable_nmi_window(struct kvm_vcpu *vcpu)
static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

@@ -4377,7 +4377,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
	return ret;
}

static void enable_smi_window(struct kvm_vcpu *vcpu)
static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);

@@ -4531,7 +4531,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
	.vcpu_blocking = svm_vcpu_blocking,
	.vcpu_unblocking = svm_vcpu_unblocking,

	.update_exception_bitmap = update_exception_bitmap,
	.update_exception_bitmap = svm_update_exception_bitmap,
	.get_msr_feature = svm_get_msr_feature,
	.get_msr = svm_get_msr,
	.set_msr = svm_set_msr,
@@ -4574,9 +4574,9 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
	.nmi_allowed = svm_nmi_allowed,
	.get_nmi_mask = svm_get_nmi_mask,
	.set_nmi_mask = svm_set_nmi_mask,
	.enable_nmi_window = enable_nmi_window,
	.enable_irq_window = enable_irq_window,
	.update_cr8_intercept = update_cr8_intercept,
	.enable_nmi_window = svm_enable_nmi_window,
	.enable_irq_window = svm_enable_irq_window,
	.update_cr8_intercept = svm_update_cr8_intercept,
	.set_virtual_apic_mode = svm_set_virtual_apic_mode,
	.refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
	.check_apicv_inhibit_reasons = svm_check_apicv_inhibit_reasons,
@@ -4619,7 +4619,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
	.smi_allowed = svm_smi_allowed,
	.pre_enter_smm = svm_pre_enter_smm,
	.pre_leave_smm = svm_pre_leave_smm,
	.enable_smi_window = enable_smi_window,
	.enable_smi_window = svm_enable_smi_window,

	.mem_enc_op = svm_mem_enc_op,
	.mem_enc_reg_region = svm_register_enc_region,
+1 −1
Original line number Diff line number Diff line
@@ -2532,7 +2532,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
	 * bitwise-or of what L1 wants to trap for L2, and what we want to
	 * trap. Note that CR0.TS also needs updating - we do this later.
	 */
	update_exception_bitmap(vcpu);
	vmx_update_exception_bitmap(vcpu);
	vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
	vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);

+15 −15
Original line number Diff line number Diff line
@@ -814,7 +814,7 @@ static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
	return *p;
}

void update_exception_bitmap(struct kvm_vcpu *vcpu)
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
{
	u32 eb;

@@ -2788,7 +2788,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
	vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
			(vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));

	update_exception_bitmap(vcpu);
	vmx_update_exception_bitmap(vcpu);

	fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
	fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
@@ -2868,7 +2868,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)

	vmcs_writel(GUEST_RFLAGS, flags);
	vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
	update_exception_bitmap(vcpu);
	vmx_update_exception_bitmap(vcpu);

	fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
	fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
@@ -4519,23 +4519,23 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
	vmx_set_cr4(vcpu, 0);
	vmx_set_efer(vcpu, 0);

	update_exception_bitmap(vcpu);
	vmx_update_exception_bitmap(vcpu);

	vpid_sync_context(vmx->vpid);
	if (init_event)
		vmx_clear_hlt(vcpu);
}

static void enable_irq_window(struct kvm_vcpu *vcpu)
static void vmx_enable_irq_window(struct kvm_vcpu *vcpu)
{
	exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING);
}

static void enable_nmi_window(struct kvm_vcpu *vcpu)
static void vmx_enable_nmi_window(struct kvm_vcpu *vcpu)
{
	if (!enable_vnmi ||
	    vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
		enable_irq_window(vcpu);
		vmx_enable_irq_window(vcpu);
		return;
	}

@@ -6210,7 +6210,7 @@ static noinstr void vmx_l1d_flush(struct kvm_vcpu *vcpu)
		: "eax", "ebx", "ecx", "edx");
}

static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
static void vmx_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
	int tpr_threshold;
@@ -7339,7 +7339,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
	set_cr4_guest_host_mask(vmx);

	/* Refresh #PF interception to account for MAXPHYADDR changes. */
	update_exception_bitmap(vcpu);
	vmx_update_exception_bitmap(vcpu);
}

static __init void vmx_set_cpu_caps(void)
@@ -7629,7 +7629,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
	return 0;
}

static void enable_smi_window(struct kvm_vcpu *vcpu)
static void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
{
	/* RSM will cause a vmexit anyway.  */
}
@@ -7689,7 +7689,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
	.vcpu_load = vmx_vcpu_load,
	.vcpu_put = vmx_vcpu_put,

	.update_exception_bitmap = update_exception_bitmap,
	.update_exception_bitmap = vmx_update_exception_bitmap,
	.get_msr_feature = vmx_get_msr_feature,
	.get_msr = vmx_get_msr,
	.set_msr = vmx_set_msr,
@@ -7732,9 +7732,9 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
	.nmi_allowed = vmx_nmi_allowed,
	.get_nmi_mask = vmx_get_nmi_mask,
	.set_nmi_mask = vmx_set_nmi_mask,
	.enable_nmi_window = enable_nmi_window,
	.enable_irq_window = enable_irq_window,
	.update_cr8_intercept = update_cr8_intercept,
	.enable_nmi_window = vmx_enable_nmi_window,
	.enable_irq_window = vmx_enable_irq_window,
	.update_cr8_intercept = vmx_update_cr8_intercept,
	.set_virtual_apic_mode = vmx_set_virtual_apic_mode,
	.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
	.refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
@@ -7792,7 +7792,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
	.smi_allowed = vmx_smi_allowed,
	.pre_enter_smm = vmx_pre_enter_smm,
	.pre_leave_smm = vmx_pre_leave_smm,
	.enable_smi_window = enable_smi_window,
	.enable_smi_window = vmx_enable_smi_window,

	.can_emulate_instruction = vmx_can_emulate_instruction,
	.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
+1 −1
Original line number Diff line number Diff line
@@ -378,7 +378,7 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
		   int root_level);

void update_exception_bitmap(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);