Loading arch/x86/kvm/mmu.c +1 −0 Original line number Diff line number Diff line Loading @@ -4264,6 +4264,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm) { spin_lock(&kvm->mmu_lock); trace_kvm_mmu_invalidate_zap_all_pages(kvm); kvm->arch.mmu_valid_gen++; kvm_zap_obsolete_pages(kvm); Loading arch/x86/kvm/mmutrace.h +20 −0 Original line number Diff line number Diff line Loading @@ -276,6 +276,26 @@ TRACE_EVENT( __spte_satisfied(old_spte), __spte_satisfied(new_spte) ) ); TRACE_EVENT( kvm_mmu_invalidate_zap_all_pages, TP_PROTO(struct kvm *kvm), TP_ARGS(kvm), TP_STRUCT__entry( __field(unsigned long, mmu_valid_gen) __field(unsigned int, mmu_used_pages) ), TP_fast_assign( __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen; __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages; ), TP_printk("kvm-mmu-valid-gen %lx used_pages %x", __entry->mmu_valid_gen, __entry->mmu_used_pages ) ); #endif /* _TRACE_KVMMMU_H */ #undef TRACE_INCLUDE_PATH Loading Loading
arch/x86/kvm/mmu.c +1 −0 Original line number Diff line number Diff line Loading @@ -4264,6 +4264,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm) void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm) { spin_lock(&kvm->mmu_lock); trace_kvm_mmu_invalidate_zap_all_pages(kvm); kvm->arch.mmu_valid_gen++; kvm_zap_obsolete_pages(kvm); Loading
arch/x86/kvm/mmutrace.h +20 −0 Original line number Diff line number Diff line Loading @@ -276,6 +276,26 @@ TRACE_EVENT( __spte_satisfied(old_spte), __spte_satisfied(new_spte) ) ); TRACE_EVENT( kvm_mmu_invalidate_zap_all_pages, TP_PROTO(struct kvm *kvm), TP_ARGS(kvm), TP_STRUCT__entry( __field(unsigned long, mmu_valid_gen) __field(unsigned int, mmu_used_pages) ), TP_fast_assign( __entry->mmu_valid_gen = kvm->arch.mmu_valid_gen; __entry->mmu_used_pages = kvm->arch.n_used_mmu_pages; ), TP_printk("kvm-mmu-valid-gen %lx used_pages %x", __entry->mmu_valid_gen, __entry->mmu_used_pages ) ); #endif /* _TRACE_KVMMMU_H */ #undef TRACE_INCLUDE_PATH Loading