Loading arch/x86/kvm/mmu.c +18 −0 Original line number Diff line number Diff line Loading @@ -3038,6 +3038,24 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, return r; } int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) { struct kvm_shadow_walk_iterator iterator; int nr_sptes = 0; spin_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry(vcpu, addr, iterator) { sptes[iterator.level-1] = *iterator.sptep; nr_sptes++; if (!is_shadow_present_pte(*iterator.sptep)) break; } spin_unlock(&vcpu->kvm->mmu_lock); return nr_sptes; } EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); #ifdef AUDIT static const char *audit_msg; Loading arch/x86/kvm/mmu.h +2 −0 Original line number Diff line number Diff line Loading @@ -37,6 +37,8 @@ #define PT32_ROOT_LEVEL 2 #define PT32E_ROOT_LEVEL 3 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) { if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) Loading Loading
arch/x86/kvm/mmu.c +18 −0 Original line number Diff line number Diff line Loading @@ -3038,6 +3038,24 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, return r; } int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]) { struct kvm_shadow_walk_iterator iterator; int nr_sptes = 0; spin_lock(&vcpu->kvm->mmu_lock); for_each_shadow_entry(vcpu, addr, iterator) { sptes[iterator.level-1] = *iterator.sptep; nr_sptes++; if (!is_shadow_present_pte(*iterator.sptep)) break; } spin_unlock(&vcpu->kvm->mmu_lock); return nr_sptes; } EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy); #ifdef AUDIT static const char *audit_msg; Loading
arch/x86/kvm/mmu.h +2 −0 Original line number Diff line number Diff line Loading @@ -37,6 +37,8 @@ #define PT32_ROOT_LEVEL 2 #define PT32E_ROOT_LEVEL 3 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) { if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) Loading