Commit f8faf02f authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch kvm-arm64/selftest/memslot-fixes into kvmarm-master/next



* kvm-arm64/selftest/memslot-fixes:
  : .
  : KVM memslot selftest fixes for non-4kB page sizes, courtesy
  : of Gavin Shan. From the cover letter:
  :
  : "kvm/selftests/memslots_perf_test doesn't work with 64KB-page-size-host
  : and 4KB-page-size-guest on aarch64. In the implementation, the host and
  : guest page size have been hardcoded to 4KB. It's ovbiously not working
  : on aarch64 which supports 4KB, 16KB, 64KB individually on host and guest.
  :
  : This series tries to fix it. After the series is applied, the test runs
  : successfully with 64KB-page-size-host and 4KB-page-size-guest."
  : .
  KVM: selftests: memslot_perf_test: Report optimal memory slots
  KVM: selftests: memslot_perf_test: Consolidate memory
  KVM: selftests: memslot_perf_test: Support variable guest page size
  KVM: selftests: memslot_perf_test: Probe memory slots for once
  KVM: selftests: memslot_perf_test: Consolidate loop conditions in prepare_vm()
  KVM: selftests: memslot_perf_test: Use data->nslots in prepare_vm()

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents f0c4d9fc a69170c6
Loading
Loading
Loading
Loading
+208 −109
Original line number Diff line number Diff line
@@ -20,20 +20,20 @@
#include <unistd.h>

#include <linux/compiler.h>
#include <linux/sizes.h>

#include <test_util.h>
#include <kvm_util.h>
#include <processor.h>

#define MEM_SIZE		((512U << 20) + 4096)
#define MEM_SIZE_PAGES		(MEM_SIZE / 4096)
#define MEM_GPA		0x10000000UL
#define MEM_EXTRA_SIZE		SZ_64K

#define MEM_SIZE		(SZ_512M + MEM_EXTRA_SIZE)
#define MEM_GPA			SZ_256M
#define MEM_AUX_GPA		MEM_GPA
#define MEM_SYNC_GPA		MEM_AUX_GPA
#define MEM_TEST_GPA		(MEM_AUX_GPA + 4096)
#define MEM_TEST_SIZE		(MEM_SIZE - 4096)
static_assert(MEM_SIZE % 4096 == 0, "invalid mem size");
static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
#define MEM_TEST_GPA		(MEM_AUX_GPA + MEM_EXTRA_SIZE)
#define MEM_TEST_SIZE		(MEM_SIZE - MEM_EXTRA_SIZE)

/*
 * 32 MiB is max size that gets well over 100 iterations on 509 slots.
@@ -41,44 +41,38 @@ static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size");
 * 8194 slots in use can then be tested (although with slightly
 * limited resolution).
 */
#define MEM_SIZE_MAP		((32U << 20) + 4096)
#define MEM_SIZE_MAP_PAGES	(MEM_SIZE_MAP / 4096)
#define MEM_TEST_MAP_SIZE	(MEM_SIZE_MAP - 4096)
#define MEM_TEST_MAP_SIZE_PAGES (MEM_TEST_MAP_SIZE / 4096)
static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size");
static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size");
static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size");
static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size");
#define MEM_SIZE_MAP		(SZ_32M + MEM_EXTRA_SIZE)
#define MEM_TEST_MAP_SIZE	(MEM_SIZE_MAP - MEM_EXTRA_SIZE)

/*
 * 128 MiB is min size that fills 32k slots with at least one page in each
 * while at the same time gets 100+ iterations in such test
 *
 * 2 MiB chunk size like a typical huge page
 */
#define MEM_TEST_UNMAP_SIZE		(128U << 20)
#define MEM_TEST_UNMAP_SIZE_PAGES	(MEM_TEST_UNMAP_SIZE / 4096)
/* 2 MiB chunk size like a typical huge page */
#define MEM_TEST_UNMAP_CHUNK_PAGES	(2U << (20 - 12))
static_assert(MEM_TEST_UNMAP_SIZE <= MEM_TEST_SIZE,
	      "invalid unmap test region size");
static_assert(MEM_TEST_UNMAP_SIZE % 4096 == 0,
	      "invalid unmap test region size");
static_assert(MEM_TEST_UNMAP_SIZE_PAGES %
	      (2 * MEM_TEST_UNMAP_CHUNK_PAGES) == 0,
	      "invalid unmap test region size");
#define MEM_TEST_UNMAP_SIZE		SZ_128M
#define MEM_TEST_UNMAP_CHUNK_SIZE	SZ_2M

/*
 * For the move active test the middle of the test area is placed on
 * a memslot boundary: half lies in the memslot being moved, half in
 * other memslot(s).
 *
 * When running this test with 32k memslots (32764, really) each memslot
 * contains 4 pages.
 * The last one additionally contains the remaining 21 pages of memory,
 * for the total size of 25 pages.
 * Hence, the maximum size here is 50 pages.
 * We have different number of memory slots, excluding the reserved
 * memory slot 0, on various architectures and configurations. The
 * memory size in this test is calculated by picking the maximal
 * last memory slot's memory size, with alignment to the largest
 * supported page size (64KB). In this way, the selected memory
 * size for this test is compatible with test_memslot_move_prepare().
 *
 * architecture   slots    memory-per-slot    memory-on-last-slot
 * --------------------------------------------------------------
 * x86-4KB        32763    16KB               160KB
 * arm64-4KB      32766    16KB               112KB
 * arm64-16KB     32766    16KB               112KB
 * arm64-64KB     8192     64KB               128KB
 */
#define MEM_TEST_MOVE_SIZE_PAGES	(50)
#define MEM_TEST_MOVE_SIZE		(MEM_TEST_MOVE_SIZE_PAGES * 4096)
#define MEM_TEST_MOVE_SIZE		(3 * SZ_64K)
#define MEM_TEST_MOVE_GPA_DEST		(MEM_GPA + MEM_SIZE)
static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE,
	      "invalid move test region size");
@@ -100,6 +94,7 @@ struct vm_data {
};

struct sync_area {
	uint32_t    guest_page_size;
	atomic_bool start_flag;
	atomic_bool exit_flag;
	atomic_bool sync_flag;
@@ -192,14 +187,15 @@ static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
	uint64_t gpage, pgoffs;
	uint32_t slot, slotoffs;
	void *base;
	uint32_t guest_page_size = data->vm->page_size;

	TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate");
	TEST_ASSERT(gpa < MEM_GPA + data->npages * 4096,
	TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size,
		    "Too high gpa to translate");
	gpa -= MEM_GPA;

	gpage = gpa / 4096;
	pgoffs = gpa % 4096;
	gpage = gpa / guest_page_size;
	pgoffs = gpa % guest_page_size;
	slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1);
	slotoffs = gpage - (slot * data->pages_per_slot);

@@ -217,14 +213,16 @@ static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
	}

	base = data->hva_slots[slot];
	return (uint8_t *)base + slotoffs * 4096 + pgoffs;
	return (uint8_t *)base + slotoffs * guest_page_size + pgoffs;
}

static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot)
{
	uint32_t guest_page_size = data->vm->page_size;

	TEST_ASSERT(slot < data->nslots, "Too high slot number");

	return MEM_GPA + slot * data->pages_per_slot * 4096;
	return MEM_GPA + slot * data->pages_per_slot * guest_page_size;
}

static struct vm_data *alloc_vm(void)
@@ -241,82 +239,110 @@ static struct vm_data *alloc_vm(void)
	return data;
}

static bool check_slot_pages(uint32_t host_page_size, uint32_t guest_page_size,
			     uint64_t pages_per_slot, uint64_t rempages)
{
	if (!pages_per_slot)
		return false;

	if ((pages_per_slot * guest_page_size) % host_page_size)
		return false;

	if ((rempages * guest_page_size) % host_page_size)
		return false;

	return true;
}


static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size)
{
	uint32_t guest_page_size = data->vm->page_size;
	uint64_t mempages, pages_per_slot, rempages;
	uint64_t slots;

	mempages = data->npages;
	slots = data->nslots;
	while (--slots > 1) {
		pages_per_slot = mempages / slots;
		rempages = mempages % pages_per_slot;
		if (check_slot_pages(host_page_size, guest_page_size,
				     pages_per_slot, rempages))
			return slots + 1;	/* slot 0 is reserved */
	}

	return 0;
}

static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
		       void *guest_code, uint64_t mempages,
		       void *guest_code, uint64_t mem_size,
		       struct timespec *slot_runtime)
{
	uint32_t max_mem_slots;
	uint64_t rempages;
	uint64_t mempages, rempages;
	uint64_t guest_addr;
	uint32_t slot;
	uint32_t slot, host_page_size, guest_page_size;
	struct timespec tstart;
	struct sync_area *sync;

	max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
	TEST_ASSERT(max_mem_slots > 1,
		    "KVM_CAP_NR_MEMSLOTS should be greater than 1");
	TEST_ASSERT(nslots > 1 || nslots == -1,
		    "Slot count cap should be greater than 1");
	if (nslots != -1)
		max_mem_slots = min(max_mem_slots, (uint32_t)nslots);
	pr_info_v("Allowed number of memory slots: %"PRIu32"\n", max_mem_slots);
	host_page_size = getpagesize();
	guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;
	mempages = mem_size / guest_page_size;

	TEST_ASSERT(mempages > 1,
		    "Can't test without any memory");
	data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
	ucall_init(data->vm, NULL);
	TEST_ASSERT(data->vm->page_size == guest_page_size, "Invalid VM page size");

	data->npages = mempages;
	data->nslots = max_mem_slots - 1;
	data->pages_per_slot = mempages / data->nslots;
	if (!data->pages_per_slot) {
		*maxslots = mempages + 1;
	TEST_ASSERT(data->npages > 1, "Can't test without any memory");
	data->nslots = nslots;
	data->pages_per_slot = data->npages / data->nslots;
	rempages = data->npages % data->nslots;
	if (!check_slot_pages(host_page_size, guest_page_size,
			      data->pages_per_slot, rempages)) {
		*maxslots = get_max_slots(data, host_page_size);
		return false;
	}

	rempages = mempages % data->nslots;
	data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
	TEST_ASSERT(data->hva_slots, "malloc() fail");

	data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
	ucall_init(data->vm, NULL);

	pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
		max_mem_slots - 1, data->pages_per_slot, rempages);
		data->nslots, data->pages_per_slot, rempages);

	clock_gettime(CLOCK_MONOTONIC, &tstart);
	for (slot = 1, guest_addr = MEM_GPA; slot < max_mem_slots; slot++) {
	for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
		uint64_t npages;

		npages = data->pages_per_slot;
		if (slot == max_mem_slots - 1)
		if (slot == data->nslots)
			npages += rempages;

		vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS,
					    guest_addr, slot, npages,
					    0);
		guest_addr += npages * 4096;
		guest_addr += npages * guest_page_size;
	}
	*slot_runtime = timespec_elapsed(tstart);

	for (slot = 0, guest_addr = MEM_GPA; slot < max_mem_slots - 1; slot++) {
	for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
		uint64_t npages;
		uint64_t gpa;

		npages = data->pages_per_slot;
		if (slot == max_mem_slots - 2)
		if (slot == data->nslots)
			npages += rempages;

		gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr,
					 slot + 1);
		gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot);
		TEST_ASSERT(gpa == guest_addr,
			    "vm_phy_pages_alloc() failed\n");

		data->hva_slots[slot] = addr_gpa2hva(data->vm, guest_addr);
		memset(data->hva_slots[slot], 0, npages * 4096);
		data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr);
		memset(data->hva_slots[slot - 1], 0, npages * guest_page_size);

		guest_addr += npages * 4096;
		guest_addr += npages * guest_page_size;
	}

	virt_map(data->vm, MEM_GPA, MEM_GPA, mempages);
	virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages);

	sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
	atomic_init(&sync->start_flag, false);
@@ -415,6 +441,7 @@ static bool guest_perform_sync(void)
static void guest_code_test_memslot_move(void)
{
	struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
	uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);
	uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr);

	GUEST_SYNC(0);
@@ -425,7 +452,7 @@ static void guest_code_test_memslot_move(void)
		uintptr_t ptr;

		for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE;
		     ptr += 4096)
		     ptr += page_size)
			*(uint64_t *)ptr = MEM_TEST_VAL_1;

		/*
@@ -443,6 +470,7 @@ static void guest_code_test_memslot_move(void)
static void guest_code_test_memslot_map(void)
{
	struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
	uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);

	GUEST_SYNC(0);

@@ -452,14 +480,16 @@ static void guest_code_test_memslot_map(void)
		uintptr_t ptr;

		for (ptr = MEM_TEST_GPA;
		     ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr += 4096)
		     ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
		     ptr += page_size)
			*(uint64_t *)ptr = MEM_TEST_VAL_1;

		if (!guest_perform_sync())
			break;

		for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2;
		     ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; ptr += 4096)
		     ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE;
		     ptr += page_size)
			*(uint64_t *)ptr = MEM_TEST_VAL_2;

		if (!guest_perform_sync())
@@ -506,6 +536,9 @@ static void guest_code_test_memslot_unmap(void)

static void guest_code_test_memslot_rw(void)
{
	struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA;
	uint32_t page_size = (typeof(page_size))READ_ONCE(sync->guest_page_size);

	GUEST_SYNC(0);

	guest_spin_until_start();
@@ -514,14 +547,14 @@ static void guest_code_test_memslot_rw(void)
		uintptr_t ptr;

		for (ptr = MEM_TEST_GPA;
		     ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096)
		     ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size)
			*(uint64_t *)ptr = MEM_TEST_VAL_1;

		if (!guest_perform_sync())
			break;

		for (ptr = MEM_TEST_GPA + 4096 / 2;
		     ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096) {
		for (ptr = MEM_TEST_GPA + page_size / 2;
		     ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) {
			uint64_t val = *(uint64_t *)ptr;

			GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val);
@@ -539,6 +572,7 @@ static bool test_memslot_move_prepare(struct vm_data *data,
				      struct sync_area *sync,
				      uint64_t *maxslots, bool isactive)
{
	uint32_t guest_page_size = data->vm->page_size;
	uint64_t movesrcgpa, movetestgpa;

	movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
@@ -547,7 +581,7 @@ static bool test_memslot_move_prepare(struct vm_data *data,
		uint64_t lastpages;

		vm_gpa2hva(data, movesrcgpa, &lastpages);
		if (lastpages < MEM_TEST_MOVE_SIZE_PAGES / 2) {
		if (lastpages * guest_page_size < MEM_TEST_MOVE_SIZE / 2) {
			*maxslots = 0;
			return false;
		}
@@ -593,8 +627,9 @@ static void test_memslot_do_unmap(struct vm_data *data,
				  uint64_t offsp, uint64_t count)
{
	uint64_t gpa, ctr;
	uint32_t guest_page_size = data->vm->page_size;

	for (gpa = MEM_TEST_GPA + offsp * 4096, ctr = 0; ctr < count; ) {
	for (gpa = MEM_TEST_GPA + offsp * guest_page_size, ctr = 0; ctr < count; ) {
		uint64_t npages;
		void *hva;
		int ret;
@@ -602,12 +637,12 @@ static void test_memslot_do_unmap(struct vm_data *data,
		hva = vm_gpa2hva(data, gpa, &npages);
		TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa);
		npages = min(npages, count - ctr);
		ret = madvise(hva, npages * 4096, MADV_DONTNEED);
		ret = madvise(hva, npages * guest_page_size, MADV_DONTNEED);
		TEST_ASSERT(!ret,
			    "madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64,
			    hva, gpa);
		ctr += npages;
		gpa += npages * 4096;
		gpa += npages * guest_page_size;
	}
	TEST_ASSERT(ctr == count,
		    "madvise(MADV_DONTNEED) should exactly cover all of the requested area");
@@ -618,11 +653,12 @@ static void test_memslot_map_unmap_check(struct vm_data *data,
{
	uint64_t gpa;
	uint64_t *val;
	uint32_t guest_page_size = data->vm->page_size;

	if (!map_unmap_verify)
		return;

	gpa = MEM_TEST_GPA + offsp * 4096;
	gpa = MEM_TEST_GPA + offsp * guest_page_size;
	val = (typeof(val))vm_gpa2hva(data, gpa, NULL);
	TEST_ASSERT(*val == valexp,
		    "Guest written values should read back correctly before unmap (%"PRIu64" vs %"PRIu64" @ %"PRIx64")",
@@ -632,12 +668,14 @@ static void test_memslot_map_unmap_check(struct vm_data *data,

static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
{
	uint32_t guest_page_size = data->vm->page_size;
	uint64_t guest_pages = MEM_TEST_MAP_SIZE / guest_page_size;

	/*
	 * Unmap the second half of the test area while guest writes to (maps)
	 * the first half.
	 */
	test_memslot_do_unmap(data, MEM_TEST_MAP_SIZE_PAGES / 2,
			      MEM_TEST_MAP_SIZE_PAGES / 2);
	test_memslot_do_unmap(data, guest_pages / 2, guest_pages / 2);

	/*
	 * Wait for the guest to finish writing the first half of the test
@@ -648,10 +686,8 @@ static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
	 */
	host_perform_sync(sync);
	test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
	test_memslot_map_unmap_check(data,
				     MEM_TEST_MAP_SIZE_PAGES / 2 - 1,
				     MEM_TEST_VAL_1);
	test_memslot_do_unmap(data, 0, MEM_TEST_MAP_SIZE_PAGES / 2);
	test_memslot_map_unmap_check(data, guest_pages / 2 - 1, MEM_TEST_VAL_1);
	test_memslot_do_unmap(data, 0, guest_pages / 2);


	/*
@@ -664,16 +700,16 @@ static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
	 * the test area.
	 */
	host_perform_sync(sync);
	test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES / 2,
				     MEM_TEST_VAL_2);
	test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES - 1,
				     MEM_TEST_VAL_2);
	test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2);
	test_memslot_map_unmap_check(data, guest_pages - 1, MEM_TEST_VAL_2);
}

static void test_memslot_unmap_loop_common(struct vm_data *data,
					   struct sync_area *sync,
					   uint64_t chunk)
{
	uint32_t guest_page_size = data->vm->page_size;
	uint64_t guest_pages = MEM_TEST_UNMAP_SIZE / guest_page_size;
	uint64_t ctr;

	/*
@@ -685,42 +721,49 @@ static void test_memslot_unmap_loop_common(struct vm_data *data,
	 */
	host_perform_sync(sync);
	test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
	for (ctr = 0; ctr < MEM_TEST_UNMAP_SIZE_PAGES / 2; ctr += chunk)
	for (ctr = 0; ctr < guest_pages / 2; ctr += chunk)
		test_memslot_do_unmap(data, ctr, chunk);

	/* Likewise, but for the opposite host / guest areas */
	host_perform_sync(sync);
	test_memslot_map_unmap_check(data, MEM_TEST_UNMAP_SIZE_PAGES / 2,
				     MEM_TEST_VAL_2);
	for (ctr = MEM_TEST_UNMAP_SIZE_PAGES / 2;
	     ctr < MEM_TEST_UNMAP_SIZE_PAGES; ctr += chunk)
	test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2);
	for (ctr = guest_pages / 2; ctr < guest_pages; ctr += chunk)
		test_memslot_do_unmap(data, ctr, chunk);
}

static void test_memslot_unmap_loop(struct vm_data *data,
				    struct sync_area *sync)
{
	test_memslot_unmap_loop_common(data, sync, 1);
	uint32_t host_page_size = getpagesize();
	uint32_t guest_page_size = data->vm->page_size;
	uint64_t guest_chunk_pages = guest_page_size >= host_page_size ?
					1 : host_page_size / guest_page_size;

	test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
}

static void test_memslot_unmap_loop_chunked(struct vm_data *data,
					    struct sync_area *sync)
{
	test_memslot_unmap_loop_common(data, sync, MEM_TEST_UNMAP_CHUNK_PAGES);
	uint32_t guest_page_size = data->vm->page_size;
	uint64_t guest_chunk_pages = MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size;

	test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
}

static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
{
	uint64_t gptr;
	uint32_t guest_page_size = data->vm->page_size;

	for (gptr = MEM_TEST_GPA + 4096 / 2;
	     gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096)
	for (gptr = MEM_TEST_GPA + guest_page_size / 2;
	     gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size)
		*(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;

	host_perform_sync(sync);

	for (gptr = MEM_TEST_GPA;
	     gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096) {
	     gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += guest_page_size) {
		uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
		uint64_t val = *vptr;

@@ -749,7 +792,7 @@ static bool test_execute(int nslots, uint64_t *maxslots,
			 struct timespec *slot_runtime,
			 struct timespec *guest_runtime)
{
	uint64_t mem_size = tdata->mem_size ? : MEM_SIZE_PAGES;
	uint64_t mem_size = tdata->mem_size ? : MEM_SIZE;
	struct vm_data *data;
	struct sync_area *sync;
	struct timespec tstart;
@@ -764,6 +807,7 @@ static bool test_execute(int nslots, uint64_t *maxslots,

	sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);

	sync->guest_page_size = data->vm->page_size;
	if (tdata->prepare &&
	    !tdata->prepare(data, sync, maxslots)) {
		ret = false;
@@ -797,19 +841,19 @@ static bool test_execute(int nslots, uint64_t *maxslots,
static const struct test_data tests[] = {
	{
		.name = "map",
		.mem_size = MEM_SIZE_MAP_PAGES,
		.mem_size = MEM_SIZE_MAP,
		.guest_code = guest_code_test_memslot_map,
		.loop = test_memslot_map_loop,
	},
	{
		.name = "unmap",
		.mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
		.mem_size = MEM_TEST_UNMAP_SIZE + MEM_EXTRA_SIZE,
		.guest_code = guest_code_test_memslot_unmap,
		.loop = test_memslot_unmap_loop,
	},
	{
		.name = "unmap chunked",
		.mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1,
		.mem_size = MEM_TEST_UNMAP_SIZE + MEM_EXTRA_SIZE,
		.guest_code = guest_code_test_memslot_unmap,
		.loop = test_memslot_unmap_loop_chunked,
	},
@@ -867,9 +911,46 @@ static void help(char *name, struct test_args *targs)
		pr_info("%d: %s\n", ctr, tests[ctr].name);
}

static bool check_memory_sizes(void)
{
	uint32_t host_page_size = getpagesize();
	uint32_t guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size;

	if (host_page_size > SZ_64K || guest_page_size > SZ_64K) {
		pr_info("Unsupported page size on host (0x%x) or guest (0x%x)\n",
			host_page_size, guest_page_size);
		return false;
	}

	if (MEM_SIZE % guest_page_size ||
	    MEM_TEST_SIZE % guest_page_size) {
		pr_info("invalid MEM_SIZE or MEM_TEST_SIZE\n");
		return false;
	}

	if (MEM_SIZE_MAP % guest_page_size		||
	    MEM_TEST_MAP_SIZE % guest_page_size		||
	    (MEM_TEST_MAP_SIZE / guest_page_size) <= 2	||
	    (MEM_TEST_MAP_SIZE / guest_page_size) % 2) {
		pr_info("invalid MEM_SIZE_MAP or MEM_TEST_MAP_SIZE\n");
		return false;
	}

	if (MEM_TEST_UNMAP_SIZE > MEM_TEST_SIZE		||
	    MEM_TEST_UNMAP_SIZE % guest_page_size	||
	    (MEM_TEST_UNMAP_SIZE / guest_page_size) %
	    (2 * MEM_TEST_UNMAP_CHUNK_SIZE / guest_page_size)) {
		pr_info("invalid MEM_TEST_UNMAP_SIZE or MEM_TEST_UNMAP_CHUNK_SIZE\n");
		return false;
	}

	return true;
}

static bool parse_args(int argc, char *argv[],
		       struct test_args *targs)
{
	uint32_t max_mem_slots;
	int opt;

	while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) {
@@ -886,8 +967,8 @@ static bool parse_args(int argc, char *argv[],
			break;
		case 's':
			targs->nslots = atoi(optarg);
			if (targs->nslots <= 0 && targs->nslots != -1) {
				pr_info("Slot count cap has to be positive or -1 for no cap\n");
			if (targs->nslots <= 1 && targs->nslots != -1) {
				pr_info("Slot count cap must be larger than 1 or -1 for no cap\n");
				return false;
			}
			break;
@@ -933,6 +1014,21 @@ static bool parse_args(int argc, char *argv[],
		return false;
	}

	max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
	if (max_mem_slots <= 1) {
		pr_info("KVM_CAP_NR_MEMSLOTS should be greater than 1\n");
		return false;
	}

	/* Memory slot 0 is reserved */
	if (targs->nslots == -1)
		targs->nslots = max_mem_slots - 1;
	else
		targs->nslots = min_t(int, targs->nslots, max_mem_slots) - 1;

	pr_info_v("Allowed Number of memory slots: %"PRIu32"\n",
		  targs->nslots + 1);

	return true;
}

@@ -1010,6 +1106,9 @@ int main(int argc, char *argv[])
	/* Tell stdout not to buffer its content */
	setbuf(stdout, NULL);

	if (!check_memory_sizes())
		return -1;

	if (!parse_args(argc, argv, &targs))
		return -1;