Commit 41e68b69 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

KVM: vmx, svm: clean up mass updates to regs_avail/regs_dirty bits



Document the meaning of the three combinations of regs_avail and
regs_dirty.  Update regs_dirty just after writeback instead of
doing it later after vmexit.  After vmexit, instead, we clear the
regs_avail bits corresponding to lazily-loaded registers.

Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent c62c7bd4
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -43,6 +43,13 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14)
BUILD_KVM_GPR_ACCESSORS(r15, R15)
#endif

/*
 * avail  dirty
 * 0	  0	  register in VMCS/VMCB
 * 0	  1	  *INVALID*
 * 1	  0	  register in vcpu->arch
 * 1	  1	  register in vcpu->arch, needs to be stored back
 */
static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
					     enum kvm_reg reg)
{
+2 −1
Original line number Diff line number Diff line
@@ -3946,6 +3946,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
		vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
		vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
	}
	vcpu->arch.regs_dirty = 0;

	if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
		kvm_before_interrupt(vcpu);
@@ -3980,7 +3981,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
		vcpu->arch.apf.host_apf_flags =
			kvm_read_and_reset_apf_flags();

	kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR);
	vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;

	/*
	 * We need to handle MC intercepts here before the vcpu has a chance to
+10 −0
Original line number Diff line number Diff line
@@ -326,6 +326,16 @@ static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
	return container_of(vcpu, struct vcpu_svm, vcpu);
}

/*
 * Only the PDPTRs are loaded on demand into the shadow MMU.  All other
 * fields are synchronized in handle_exit, because accessing the VMCB is cheap.
 *
 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
 * is changed.  svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
 */
#define SVM_REGS_LAZY_LOAD_SET	(1 << VCPU_EXREG_PDPTR)

static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
{
	WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
+7 −1
Original line number Diff line number Diff line
@@ -269,7 +269,13 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
	vmx_sync_vmcs_host_state(vmx, prev);
	put_cpu();

	vmx_register_cache_reset(vcpu);
	vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET;

	/*
	 * All lazily updated registers will be reloaded from VMCS12 on both
	 * vmentry and vmexit.
	 */
	vcpu->arch.regs_dirty = 0;
}

/*
+2 −1
Original line number Diff line number Diff line
@@ -6649,6 +6649,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
		vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
	if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
		vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
	vcpu->arch.regs_dirty = 0;

	cr3 = __get_current_cr3_fast();
	if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
@@ -6743,7 +6744,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
	loadsegment(es, __USER_DS);
#endif

	vmx_register_cache_reset(vcpu);
	vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;

	pt_guest_exit(vmx);

Loading