Commit c5f2c766 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: VMX: Shadow VMCS pin controls



Prepare to shadow all major control fields on a per-VMCS basis, which
allows KVM to avoid costly VMWRITEs when switching between vmcs01 and
vmcs02.

Shadowing pin controls also allows a future patch to remove the per-VMCS
'hv_timer_armed' flag, as the shadow copy is a superset of said flag.

Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 70f932ec
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -285,6 +285,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)


	vm_entry_controls_reset_shadow(vmx);
	vm_entry_controls_reset_shadow(vmx);
	vm_exit_controls_reset_shadow(vmx);
	vm_exit_controls_reset_shadow(vmx);
	pin_controls_reset_shadow(vmx);
	vmx_segment_cache_clear(vmx);
	vmx_segment_cache_clear(vmx);
}
}


@@ -2026,7 +2027,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
	} else {
	} else {
		exec_control &= ~PIN_BASED_POSTED_INTR;
		exec_control &= ~PIN_BASED_POSTED_INTR;
	}
	}
	vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
	pin_controls_init(vmx, exec_control);


	/*
	/*
	 * EXEC CONTROLS
	 * EXEC CONTROLS
+4 −6
Original line number Original line Diff line number Diff line
@@ -3844,7 +3844,7 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	struct vcpu_vmx *vmx = to_vmx(vcpu);


	vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
	if (cpu_has_secondary_exec_ctrls()) {
	if (cpu_has_secondary_exec_ctrls()) {
		if (kvm_vcpu_apicv_active(vcpu))
		if (kvm_vcpu_apicv_active(vcpu))
			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
			vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
@@ -4042,7 +4042,7 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
	vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
	vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */


	/* Control */
	/* Control */
	vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
	pin_controls_init(vmx, vmx_pin_based_exec_ctrl(vmx));
	vmx->hv_deadline_tsc = -1;
	vmx->hv_deadline_tsc = -1;


	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
@@ -6366,8 +6366,7 @@ static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
{
{
	vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
	vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
	if (!vmx->loaded_vmcs->hv_timer_armed)
	if (!vmx->loaded_vmcs->hv_timer_armed)
		vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
		pin_controls_setbit(vmx, PIN_BASED_VMX_PREEMPTION_TIMER);
			      PIN_BASED_VMX_PREEMPTION_TIMER);
	vmx->loaded_vmcs->hv_timer_armed = true;
	vmx->loaded_vmcs->hv_timer_armed = true;
}
}


@@ -6396,8 +6395,7 @@ static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
	}
	}


	if (vmx->loaded_vmcs->hv_timer_armed)
	if (vmx->loaded_vmcs->hv_timer_armed)
		vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
		pin_controls_clearbit(vmx, PIN_BASED_VMX_PREEMPTION_TIMER);
				PIN_BASED_VMX_PREEMPTION_TIMER);
	vmx->loaded_vmcs->hv_timer_armed = false;
	vmx->loaded_vmcs->hv_timer_armed = false;
}
}


+2 −0
Original line number Original line Diff line number Diff line
@@ -88,6 +88,7 @@ struct pt_desc {
struct vmx_controls_shadow {
struct vmx_controls_shadow {
	u32 vm_entry;
	u32 vm_entry;
	u32 vm_exit;
	u32 vm_exit;
	u32 pin;
};
};


/*
/*
@@ -423,6 +424,7 @@ static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
}
}
BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)


static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{
{