Loading arch/x86/kvm/x86.c +5 −5 Original line number Diff line number Diff line Loading @@ -6105,7 +6105,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, int user_alloc) { int npages = mem->memory_size >> PAGE_SHIFT; int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT; if (!user_alloc && !old.user_alloc && old.rmap && !npages) { int ret; Loading @@ -6120,12 +6120,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, "failed to munmap memory\n"); } if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); spin_lock(&kvm->mmu_lock); if (!kvm->arch.n_requested_mmu_pages) { unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); if (nr_mmu_pages) kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); } kvm_mmu_slot_remove_write_access(kvm, mem->slot); spin_unlock(&kvm->mmu_lock); } Loading Loading
arch/x86/kvm/x86.c +5 −5 Original line number Diff line number Diff line Loading @@ -6105,7 +6105,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, int user_alloc) { int npages = mem->memory_size >> PAGE_SHIFT; int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT; if (!user_alloc && !old.user_alloc && old.rmap && !npages) { int ret; Loading @@ -6120,12 +6120,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, "failed to munmap memory\n"); } if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); spin_lock(&kvm->mmu_lock); if (!kvm->arch.n_requested_mmu_pages) { unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); if (nr_mmu_pages) kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages); } kvm_mmu_slot_remove_write_access(kvm, mem->slot); spin_unlock(&kvm->mmu_lock); } Loading