Loading arch/x86/kvm/svm.c +1 −1 Original line number Diff line number Diff line Loading @@ -2980,7 +2980,7 @@ static int handle_exit(struct kvm_vcpu *vcpu) struct kvm_run *kvm_run = vcpu->run; u32 exit_code = svm->vmcb->control.exit_code; trace_kvm_exit(exit_code, vcpu); trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK)) vcpu->arch.cr0 = svm->vmcb->save.cr0; Loading arch/x86/kvm/trace.h +7 −2 Original line number Diff line number Diff line Loading @@ -178,21 +178,26 @@ TRACE_EVENT(kvm_apic, #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) #define KVM_ISA_VMX 1 #define KVM_ISA_SVM 2 /* * Tracepoint for kvm guest exit: */ TRACE_EVENT(kvm_exit, TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu), TP_ARGS(exit_reason, vcpu), TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), TP_ARGS(exit_reason, vcpu, isa), TP_STRUCT__entry( __field( unsigned int, exit_reason ) __field( unsigned long, guest_rip ) __field( u32, isa ) ), TP_fast_assign( __entry->exit_reason = exit_reason; __entry->guest_rip = kvm_rip_read(vcpu); __entry->isa = isa; ), TP_printk("reason %s rip 0x%lx", Loading arch/x86/kvm/vmx.c +1 −1 Original line number Diff line number Diff line Loading @@ -3700,7 +3700,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) u32 exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; trace_kvm_exit(exit_reason, vcpu); trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); /* If guest state is invalid, start emulating */ if (vmx->emulation_required && emulate_invalid_guest_state) Loading Loading
arch/x86/kvm/svm.c +1 −1 Original line number Diff line number Diff line Loading @@ -2980,7 +2980,7 @@ static int handle_exit(struct kvm_vcpu *vcpu) struct kvm_run *kvm_run = vcpu->run; u32 exit_code = svm->vmcb->control.exit_code; trace_kvm_exit(exit_code, vcpu); trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK)) vcpu->arch.cr0 = svm->vmcb->save.cr0; Loading
arch/x86/kvm/trace.h +7 −2 Original line number Diff line number Diff line Loading @@ -178,21 +178,26 @@ TRACE_EVENT(kvm_apic, #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) #define KVM_ISA_VMX 1 #define KVM_ISA_SVM 2 /* * Tracepoint for kvm guest exit: */ TRACE_EVENT(kvm_exit, TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu), TP_ARGS(exit_reason, vcpu), TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa), TP_ARGS(exit_reason, vcpu, isa), TP_STRUCT__entry( __field( unsigned int, exit_reason ) __field( unsigned long, guest_rip ) __field( u32, isa ) ), TP_fast_assign( __entry->exit_reason = exit_reason; __entry->guest_rip = kvm_rip_read(vcpu); __entry->isa = isa; ), TP_printk("reason %s rip 0x%lx", Loading
arch/x86/kvm/vmx.c +1 −1 Original line number Diff line number Diff line Loading @@ -3700,7 +3700,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) u32 exit_reason = vmx->exit_reason; u32 vectoring_info = vmx->idt_vectoring_info; trace_kvm_exit(exit_reason, vcpu); trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); /* If guest state is invalid, start emulating */ if (vmx->emulation_required && emulate_invalid_guest_state) Loading