Loading arch/ia64/kvm/kvm-ia64.c +2 −4 Original line number Diff line number Diff line Loading @@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int i, j; int j; unsigned long base_gfn; slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { memslot = &slots->memslots[i]; kvm_for_each_memslot(memslot, slots) { base_gfn = memslot->base_gfn; for (j = 0; j < memslot->npages; j++) { if (memslot->rmap[j]) put_page((struct page *)memslot->rmap[j]); Loading arch/x86/kvm/mmu.c +6 −6 Original line number Diff line number Diff line Loading @@ -1128,15 +1128,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, int (*handler)(struct kvm *kvm, unsigned long *rmapp, unsigned long data)) { int i, j; int j; int ret; int retval = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { struct kvm_memory_slot *memslot = &slots->memslots[i]; kvm_for_each_memslot(memslot, slots) { unsigned long start = memslot->userspace_addr; unsigned long end; Loading Loading @@ -3985,15 +3985,15 @@ int kvm_mmu_module_init(void) */ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) { int i; unsigned int nr_mmu_pages; unsigned int nr_pages = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) nr_pages += slots->memslots[i].npages; kvm_for_each_memslot(memslot, slots) nr_pages += memslot->npages; nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; nr_mmu_pages = max(nr_mmu_pages, Loading include/linux/kvm_host.h +4 −0 Original line number Diff line number Diff line Loading @@ -308,6 +308,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ idx++) #define kvm_for_each_memslot(memslot, slots) \ for (memslot = &slots->memslots[0]; \ memslot < slots->memslots + (slots)->nmemslots; memslot++) int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); Loading virt/kvm/iommu.c +9 −8 Original line number Diff line number Diff line Loading @@ -134,14 +134,15 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) static int kvm_iommu_map_memslots(struct kvm *kvm) { int i, idx, r = 0; int idx, r = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { r = kvm_iommu_map_pages(kvm, &slots->memslots[i]); kvm_for_each_memslot(memslot, slots) { r = kvm_iommu_map_pages(kvm, memslot); if (r) break; } Loading Loading @@ -311,16 +312,16 @@ static void kvm_iommu_put_pages(struct kvm *kvm, static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int i, idx; int idx; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, slots->memslots[i].npages); } kvm_for_each_memslot(memslot, slots) kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages); srcu_read_unlock(&kvm->srcu, idx); return 0; Loading virt/kvm/kvm_main.c +6 −8 Original line number Diff line number Diff line Loading @@ -547,11 +547,11 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, void kvm_free_physmem(struct kvm *kvm) { int i; struct kvm_memslots *slots = kvm->memslots; struct kvm_memory_slot *memslot; for (i = 0; i < slots->nmemslots; ++i) kvm_free_physmem_slot(&slots->memslots[i], NULL); kvm_for_each_memslot(memslot, slots) kvm_free_physmem_slot(memslot, NULL); kfree(kvm->memslots); } Loading Loading @@ -975,15 +975,13 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) { int i; for (i = 0; i < slots->nmemslots; ++i) { struct kvm_memory_slot *memslot = &slots->memslots[i]; struct kvm_memory_slot *memslot; kvm_for_each_memslot(memslot, slots) if (gfn >= memslot->base_gfn && gfn < memslot->base_gfn + memslot->npages) return memslot; } return NULL; } Loading Loading
arch/ia64/kvm/kvm-ia64.c +2 −4 Original line number Diff line number Diff line Loading @@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; int i, j; int j; unsigned long base_gfn; slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { memslot = &slots->memslots[i]; kvm_for_each_memslot(memslot, slots) { base_gfn = memslot->base_gfn; for (j = 0; j < memslot->npages; j++) { if (memslot->rmap[j]) put_page((struct page *)memslot->rmap[j]); Loading
arch/x86/kvm/mmu.c +6 −6 Original line number Diff line number Diff line Loading @@ -1128,15 +1128,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, int (*handler)(struct kvm *kvm, unsigned long *rmapp, unsigned long data)) { int i, j; int j; int ret; int retval = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { struct kvm_memory_slot *memslot = &slots->memslots[i]; kvm_for_each_memslot(memslot, slots) { unsigned long start = memslot->userspace_addr; unsigned long end; Loading Loading @@ -3985,15 +3985,15 @@ int kvm_mmu_module_init(void) */ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) { int i; unsigned int nr_mmu_pages; unsigned int nr_pages = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) nr_pages += slots->memslots[i].npages; kvm_for_each_memslot(memslot, slots) nr_pages += memslot->npages; nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; nr_mmu_pages = max(nr_mmu_pages, Loading
include/linux/kvm_host.h +4 −0 Original line number Diff line number Diff line Loading @@ -308,6 +308,10 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ idx++) #define kvm_for_each_memslot(memslot, slots) \ for (memslot = &slots->memslots[0]; \ memslot < slots->memslots + (slots)->nmemslots; memslot++) int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); Loading
virt/kvm/iommu.c +9 −8 Original line number Diff line number Diff line Loading @@ -134,14 +134,15 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) static int kvm_iommu_map_memslots(struct kvm *kvm) { int i, idx, r = 0; int idx, r = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { r = kvm_iommu_map_pages(kvm, &slots->memslots[i]); kvm_for_each_memslot(memslot, slots) { r = kvm_iommu_map_pages(kvm, memslot); if (r) break; } Loading Loading @@ -311,16 +312,16 @@ static void kvm_iommu_put_pages(struct kvm *kvm, static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int i, idx; int idx; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); for (i = 0; i < slots->nmemslots; i++) { kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, slots->memslots[i].npages); } kvm_for_each_memslot(memslot, slots) kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages); srcu_read_unlock(&kvm->srcu, idx); return 0; Loading
virt/kvm/kvm_main.c +6 −8 Original line number Diff line number Diff line Loading @@ -547,11 +547,11 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free, void kvm_free_physmem(struct kvm *kvm) { int i; struct kvm_memslots *slots = kvm->memslots; struct kvm_memory_slot *memslot; for (i = 0; i < slots->nmemslots; ++i) kvm_free_physmem_slot(&slots->memslots[i], NULL); kvm_for_each_memslot(memslot, slots) kvm_free_physmem_slot(memslot, NULL); kfree(kvm->memslots); } Loading Loading @@ -975,15 +975,13 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva); static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) { int i; for (i = 0; i < slots->nmemslots; ++i) { struct kvm_memory_slot *memslot = &slots->memslots[i]; struct kvm_memory_slot *memslot; kvm_for_each_memslot(memslot, slots) if (gfn >= memslot->base_gfn && gfn < memslot->base_gfn + memslot->npages) return memslot; } return NULL; } Loading