Loading tools/testing/selftests/kvm/include/kvm_util.h +2 −0 Original line number Diff line number Diff line Loading @@ -110,6 +110,8 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, uint32_t pgd_memslot); vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot); struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size, void *guest_code); Loading tools/testing/selftests/kvm/lib/kvm_util.c +37 −23 Original line number Diff line number Diff line Loading @@ -1378,10 +1378,11 @@ const char *exit_reason_str(unsigned int exit_reason) } /* * Physical Page Allocate * Physical Contiguous Page Allocator * * Input Args: * vm - Virtual Machine * num - number of pages * paddr_min - Physical address minimum * memslot - Memory region to allocate page from * Loading @@ -1390,16 +1391,18 @@ const char *exit_reason_str(unsigned int exit_reason) * Return: * Starting physical address * * Within the VM specified by vm, locates an available physical page * at or above paddr_min. If found, the page is marked as in use * and its address is returned. A TEST_ASSERT failure occurs if no * page is available at or above paddr_min. * Within the VM specified by vm, locates a range of available physical * pages at or above paddr_min. If found, the pages are marked as in use * and thier base address is returned. A TEST_ASSERT failure occurs if * not enough pages are available at or above paddr_min. */ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot) vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot) { struct userspace_mem_region *region; sparsebit_idx_t pg; sparsebit_idx_t pg, base; TEST_ASSERT(num > 0, "Must allocate at least one page"); TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " "not divisible by page size.\n" Loading @@ -1407,11 +1410,17 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, paddr_min, vm->page_size); region = memslot2region(vm, memslot); pg = paddr_min >> vm->page_shift; base = pg = paddr_min >> vm->page_shift; /* Locate next available physical page at or above paddr_min. */ do { for (; pg < base + num; ++pg) { if (!sparsebit_is_set(region->unused_phy_pages, pg)) { pg = sparsebit_next_set(region->unused_phy_pages, pg); base = pg = sparsebit_next_set(region->unused_phy_pages, pg); break; } } } while (pg && pg != base + num); if (pg == 0) { fprintf(stderr, "No guest physical page available, " "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", Loading @@ -1420,12 +1429,17 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, vm_dump(stderr, vm, 2); abort(); } } /* Specify page as in use and return its address. */ for (pg = base; pg < base + num; ++pg) sparsebit_clear(region->unused_phy_pages, pg); return pg * vm->page_size; return base * vm->page_size; } vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot) { return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); } /* Loading Loading
tools/testing/selftests/kvm/include/kvm_util.h +2 −0 Original line number Diff line number Diff line Loading @@ -110,6 +110,8 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, uint32_t pgd_memslot); vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot); struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size, void *guest_code); Loading
tools/testing/selftests/kvm/lib/kvm_util.c +37 −23 Original line number Diff line number Diff line Loading @@ -1378,10 +1378,11 @@ const char *exit_reason_str(unsigned int exit_reason) } /* * Physical Page Allocate * Physical Contiguous Page Allocator * * Input Args: * vm - Virtual Machine * num - number of pages * paddr_min - Physical address minimum * memslot - Memory region to allocate page from * Loading @@ -1390,16 +1391,18 @@ const char *exit_reason_str(unsigned int exit_reason) * Return: * Starting physical address * * Within the VM specified by vm, locates an available physical page * at or above paddr_min. If found, the page is marked as in use * and its address is returned. A TEST_ASSERT failure occurs if no * page is available at or above paddr_min. * Within the VM specified by vm, locates a range of available physical * pages at or above paddr_min. If found, the pages are marked as in use * and thier base address is returned. A TEST_ASSERT failure occurs if * not enough pages are available at or above paddr_min. */ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot) vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot) { struct userspace_mem_region *region; sparsebit_idx_t pg; sparsebit_idx_t pg, base; TEST_ASSERT(num > 0, "Must allocate at least one page"); TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " "not divisible by page size.\n" Loading @@ -1407,11 +1410,17 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, paddr_min, vm->page_size); region = memslot2region(vm, memslot); pg = paddr_min >> vm->page_shift; base = pg = paddr_min >> vm->page_shift; /* Locate next available physical page at or above paddr_min. */ do { for (; pg < base + num; ++pg) { if (!sparsebit_is_set(region->unused_phy_pages, pg)) { pg = sparsebit_next_set(region->unused_phy_pages, pg); base = pg = sparsebit_next_set(region->unused_phy_pages, pg); break; } } } while (pg && pg != base + num); if (pg == 0) { fprintf(stderr, "No guest physical page available, " "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", Loading @@ -1420,12 +1429,17 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, vm_dump(stderr, vm, 2); abort(); } } /* Specify page as in use and return its address. */ for (pg = base; pg < base + num; ++pg) sparsebit_clear(region->unused_phy_pages, pg); return pg * vm->page_size; return base * vm->page_size; } vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot) { return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); } /* Loading