Commit 5a2acbbb authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch kvm/selftests/memslot into kvmarm-master/next



* kvm/selftests/memslot:
  : .
  : Enable KVM memslot selftests on arm64, making them less
  : x86 specific.
  : .
  KVM: selftests: Build the memslot tests for arm64
  KVM: selftests: Make memslot_perf_test arch independent

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents be08c3cf 358928fd
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -96,6 +96,8 @@ TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_aarch64 += kvm_page_table_test
TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test
TEST_GEN_PROGS_aarch64 += memslot_perf_test
TEST_GEN_PROGS_aarch64 += rseq_test
TEST_GEN_PROGS_aarch64 += set_memory_region_test
TEST_GEN_PROGS_aarch64 += steal_time
+34 −22
Original line number Diff line number Diff line
@@ -127,43 +127,54 @@ static bool verbose;
			pr_info(__VA_ARGS__);	\
	} while (0)

static void check_mmio_access(struct vm_data *vm, struct kvm_run *run)
{
	TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
	TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
	TEST_ASSERT(run->mmio.len == 8,
		    "Unexpected exit mmio size = %u", run->mmio.len);
	TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
		    run->mmio.phys_addr <= vm->mmio_gpa_max,
		    "Unexpected exit mmio address = 0x%llx",
		    run->mmio.phys_addr);
}

static void *vcpu_worker(void *data)
{
	struct vm_data *vm = data;
	struct kvm_run *run;
	struct ucall uc;
	uint64_t cmd;

	run = vcpu_state(vm->vm, VCPU_ID);
	while (1) {
		vcpu_run(vm->vm, VCPU_ID);

		if (run->exit_reason == KVM_EXIT_IO) {
			cmd = get_ucall(vm->vm, VCPU_ID, &uc);
			if (cmd != UCALL_SYNC)
				break;

		switch (get_ucall(vm->vm, VCPU_ID, &uc)) {
		case UCALL_SYNC:
			TEST_ASSERT(uc.args[1] == 0,
				"Unexpected sync ucall, got %lx",
				(ulong)uc.args[1]);
			sem_post(&vcpu_ready);
			continue;
		}

		if (run->exit_reason != KVM_EXIT_MMIO)
		case UCALL_NONE:
			if (run->exit_reason == KVM_EXIT_MMIO)
				check_mmio_access(vm, run);
			else
				goto done;
			break;

		TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
		TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
		TEST_ASSERT(run->mmio.len == 8,
			    "Unexpected exit mmio size = %u", run->mmio.len);
		TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
			    run->mmio.phys_addr <= vm->mmio_gpa_max,
			    "Unexpected exit mmio address = 0x%llx",
			    run->mmio.phys_addr);
	}

	if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
		TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
		case UCALL_ABORT:
			TEST_FAIL("%s at %s:%ld, val = %lu",
					(const char *)uc.args[0],
					__FILE__, uc.args[1], uc.args[2]);
			break;
		case UCALL_DONE:
			goto done;
		default:
			TEST_FAIL("Unknown ucall %lu", uc.cmd);
		}
	}

done:
	return NULL;
}

@@ -268,6 +279,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
	TEST_ASSERT(data->hva_slots, "malloc() fail");

	data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
	ucall_init(data->vm, NULL);

	pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
		max_mem_slots - 1, data->pages_per_slot, rempages);