Loading arch/x86/kvm/trace.h +19 −0 Original line number Diff line number Diff line Loading @@ -1462,6 +1462,25 @@ TRACE_EVENT(kvm_hv_send_ipi_ex, __entry->vector, __entry->format, __entry->valid_bank_mask) ); TRACE_EVENT(kvm_pv_tlb_flush, TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), TP_ARGS(vcpu_id, need_flush_tlb), TP_STRUCT__entry( __field( unsigned int, vcpu_id ) __field( bool, need_flush_tlb ) ), TP_fast_assign( __entry->vcpu_id = vcpu_id; __entry->need_flush_tlb = need_flush_tlb; ), TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, __entry->need_flush_tlb ? "true" : "false") ); #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH Loading arch/x86/kvm/x86.c +2 −0 Original line number Diff line number Diff line Loading @@ -2459,6 +2459,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu) * Doing a TLB flush here, on the guest's behalf, can avoid * expensive IPIs. */ trace_kvm_pv_tlb_flush(vcpu->vcpu_id, vcpu->arch.st.steal.preempted & KVM_VCPU_FLUSH_TLB); if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB) kvm_vcpu_flush_tlb(vcpu, false); Loading Loading
arch/x86/kvm/trace.h +19 −0 Original line number Diff line number Diff line Loading @@ -1462,6 +1462,25 @@ TRACE_EVENT(kvm_hv_send_ipi_ex, __entry->vector, __entry->format, __entry->valid_bank_mask) ); TRACE_EVENT(kvm_pv_tlb_flush, TP_PROTO(unsigned int vcpu_id, bool need_flush_tlb), TP_ARGS(vcpu_id, need_flush_tlb), TP_STRUCT__entry( __field( unsigned int, vcpu_id ) __field( bool, need_flush_tlb ) ), TP_fast_assign( __entry->vcpu_id = vcpu_id; __entry->need_flush_tlb = need_flush_tlb; ), TP_printk("vcpu %u need_flush_tlb %s", __entry->vcpu_id, __entry->need_flush_tlb ? "true" : "false") ); #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH Loading
arch/x86/kvm/x86.c +2 −0 Original line number Diff line number Diff line Loading @@ -2459,6 +2459,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu) * Doing a TLB flush here, on the guest's behalf, can avoid * expensive IPIs. */ trace_kvm_pv_tlb_flush(vcpu->vcpu_id, vcpu->arch.st.steal.preempted & KVM_VCPU_FLUSH_TLB); if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB) kvm_vcpu_flush_tlb(vcpu, false); Loading