Loading arch/x86/include/asm/uaccess.h +1 −1 Original line number Diff line number Diff line Loading @@ -439,7 +439,7 @@ do { \ [ptr] "+m" (*_ptr), \ [old] "+a" (__old) \ : [new] ltype (__new) \ : "memory", "cc"); \ : "memory"); \ if (unlikely(__err)) \ goto label; \ if (unlikely(!success)) \ Loading arch/x86/kvm/mmu/mmu.c +1 −1 Original line number Diff line number Diff line Loading @@ -5179,7 +5179,7 @@ static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu) roots_to_free |= KVM_MMU_ROOT_CURRENT; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (is_obsolete_root(kvm, mmu->root.hpa)) if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa)) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); } Loading arch/x86/kvm/mmu/tdp_iter.c +9 −0 Original line number Diff line number Diff line Loading @@ -145,6 +145,15 @@ static bool try_step_up(struct tdp_iter *iter) return true; } /* * Step the iterator back up a level in the paging structure. Should only be * used when the iterator is below the root level. */ void tdp_iter_step_up(struct tdp_iter *iter) { WARN_ON(!try_step_up(iter)); } /* * Step to the next SPTE in a pre-order traversal of the paging structure. * To get to the next SPTE, the iterator either steps down towards the goal Loading arch/x86/kvm/mmu/tdp_iter.h +1 −0 Original line number Diff line number Diff line Loading @@ -114,5 +114,6 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root, int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); void tdp_iter_restart(struct tdp_iter *iter); void tdp_iter_step_up(struct tdp_iter *iter); #endif /* __KVM_X86_MMU_TDP_ITER_H */ arch/x86/kvm/mmu/tdp_mmu.c +32 −6 Original line number Diff line number Diff line Loading @@ -1742,12 +1742,12 @@ static void zap_collapsible_spte_range(struct kvm *kvm, gfn_t start = slot->base_gfn; gfn_t end = start + slot->npages; struct tdp_iter iter; int max_mapping_level; kvm_pfn_t pfn; rcu_read_lock(); tdp_root_for_each_pte(iter, root, start, end) { retry: if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; Loading @@ -1755,15 +1755,41 @@ static void zap_collapsible_spte_range(struct kvm *kvm, !is_last_spte(iter.old_spte, iter.level)) continue; /* * This is a leaf SPTE. Check if the PFN it maps can * be mapped at a higher level. */ pfn = spte_to_pfn(iter.old_spte); if (kvm_is_reserved_pfn(pfn) || iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, pfn, PG_LEVEL_NUM)) if (kvm_is_reserved_pfn(pfn)) continue; max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, pfn, PG_LEVEL_NUM); WARN_ON(max_mapping_level < iter.level); /* * If this page is already mapped at the highest * viable level, there's nothing more to do. */ if (max_mapping_level == iter.level) continue; /* * The page can be remapped at a higher level, so step * up to zap the parent SPTE. */ while (max_mapping_level > iter.level) tdp_iter_step_up(&iter); /* Note, a successful atomic zap also does a remote TLB flush. */ if (tdp_mmu_zap_spte_atomic(kvm, &iter)) goto retry; tdp_mmu_zap_spte_atomic(kvm, &iter); /* * If the atomic zap fails, the iter will recurse back into * the same subtree to retry. */ } rcu_read_unlock(); Loading Loading
arch/x86/include/asm/uaccess.h +1 −1 Original line number Diff line number Diff line Loading @@ -439,7 +439,7 @@ do { \ [ptr] "+m" (*_ptr), \ [old] "+a" (__old) \ : [new] ltype (__new) \ : "memory", "cc"); \ : "memory"); \ if (unlikely(__err)) \ goto label; \ if (unlikely(!success)) \ Loading
arch/x86/kvm/mmu/mmu.c +1 −1 Original line number Diff line number Diff line Loading @@ -5179,7 +5179,7 @@ static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu) roots_to_free |= KVM_MMU_ROOT_CURRENT; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { if (is_obsolete_root(kvm, mmu->root.hpa)) if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa)) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); } Loading
arch/x86/kvm/mmu/tdp_iter.c +9 −0 Original line number Diff line number Diff line Loading @@ -145,6 +145,15 @@ static bool try_step_up(struct tdp_iter *iter) return true; } /* * Step the iterator back up a level in the paging structure. Should only be * used when the iterator is below the root level. */ void tdp_iter_step_up(struct tdp_iter *iter) { WARN_ON(!try_step_up(iter)); } /* * Step to the next SPTE in a pre-order traversal of the paging structure. * To get to the next SPTE, the iterator either steps down towards the goal Loading
arch/x86/kvm/mmu/tdp_iter.h +1 −0 Original line number Diff line number Diff line Loading @@ -114,5 +114,6 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root, int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); void tdp_iter_restart(struct tdp_iter *iter); void tdp_iter_step_up(struct tdp_iter *iter); #endif /* __KVM_X86_MMU_TDP_ITER_H */
arch/x86/kvm/mmu/tdp_mmu.c +32 −6 Original line number Diff line number Diff line Loading @@ -1742,12 +1742,12 @@ static void zap_collapsible_spte_range(struct kvm *kvm, gfn_t start = slot->base_gfn; gfn_t end = start + slot->npages; struct tdp_iter iter; int max_mapping_level; kvm_pfn_t pfn; rcu_read_lock(); tdp_root_for_each_pte(iter, root, start, end) { retry: if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; Loading @@ -1755,15 +1755,41 @@ static void zap_collapsible_spte_range(struct kvm *kvm, !is_last_spte(iter.old_spte, iter.level)) continue; /* * This is a leaf SPTE. Check if the PFN it maps can * be mapped at a higher level. */ pfn = spte_to_pfn(iter.old_spte); if (kvm_is_reserved_pfn(pfn) || iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, pfn, PG_LEVEL_NUM)) if (kvm_is_reserved_pfn(pfn)) continue; max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, pfn, PG_LEVEL_NUM); WARN_ON(max_mapping_level < iter.level); /* * If this page is already mapped at the highest * viable level, there's nothing more to do. */ if (max_mapping_level == iter.level) continue; /* * The page can be remapped at a higher level, so step * up to zap the parent SPTE. */ while (max_mapping_level > iter.level) tdp_iter_step_up(&iter); /* Note, a successful atomic zap also does a remote TLB flush. */ if (tdp_mmu_zap_spte_atomic(kvm, &iter)) goto retry; tdp_mmu_zap_spte_atomic(kvm, &iter); /* * If the atomic zap fails, the iter will recurse back into * the same subtree to retry. */ } rcu_read_unlock(); Loading