Commit 7812d80c authored by David Matlack's avatar David Matlack Committed by Sean Christopherson
Browse files

KVM: selftests: Rename perf_test_util symbols to memstress



Replace the perf_test_ prefix on symbol names with memstress_ to match
the new file name.

"memstress" better describes the functionality proveded by this library,
which is to provide functionality for creating and running a VM that
stresses VM memory by reading and writing to guest memory on all vCPUs
in parallel.

"memstress" also contains the same number of chracters as "perf_test",
making it a drop-in replacement in symbols, e.g. function names, without
impacting line lengths. Also the lack of underscore between "mem" and
"stress" makes it clear "memstress" is a noun.

Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221012165729.3505266-4-dmatlack@google.com


Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
parent a008a335
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -126,7 +126,7 @@ static void mark_page_idle(int page_idle_fd, uint64_t pfn)
}

static void mark_vcpu_memory_idle(struct kvm_vm *vm,
				  struct perf_test_vcpu_args *vcpu_args)
				  struct memstress_vcpu_args *vcpu_args)
{
	int vcpu_idx = vcpu_args->vcpu_idx;
	uint64_t base_gva = vcpu_args->gva;
@@ -148,7 +148,7 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
	TEST_ASSERT(pagemap_fd > 0, "Failed to open pagemap.");

	for (page = 0; page < pages; page++) {
		uint64_t gva = base_gva + page * perf_test_args.guest_page_size;
		uint64_t gva = base_gva + page * memstress_args.guest_page_size;
		uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva);

		if (!pfn) {
@@ -220,10 +220,10 @@ static bool spin_wait_for_next_iteration(int *current_iteration)
	return true;
}

static void vcpu_thread_main(struct perf_test_vcpu_args *vcpu_args)
static void vcpu_thread_main(struct memstress_vcpu_args *vcpu_args)
{
	struct kvm_vcpu *vcpu = vcpu_args->vcpu;
	struct kvm_vm *vm = perf_test_args.vm;
	struct kvm_vm *vm = memstress_args.vm;
	int vcpu_idx = vcpu_args->vcpu_idx;
	int current_iteration = 0;

@@ -279,7 +279,7 @@ static void run_iteration(struct kvm_vm *vm, int nr_vcpus, const char *descripti
static void access_memory(struct kvm_vm *vm, int nr_vcpus,
			  enum access_type access, const char *description)
{
	perf_test_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
	memstress_set_write_percent(vm, (access == ACCESS_READ) ? 0 : 100);
	iteration_work = ITERATION_ACCESS_MEMORY;
	run_iteration(vm, nr_vcpus, description);
}
@@ -303,10 +303,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
	struct kvm_vm *vm;
	int nr_vcpus = params->nr_vcpus;

	vm = perf_test_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
	vm = memstress_create_vm(mode, nr_vcpus, params->vcpu_memory_bytes, 1,
				 params->backing_src, !overlap_memory_access);

	perf_test_start_vcpu_threads(nr_vcpus, vcpu_thread_main);
	memstress_start_vcpu_threads(nr_vcpus, vcpu_thread_main);

	pr_info("\n");
	access_memory(vm, nr_vcpus, ACCESS_WRITE, "Populating memory");
@@ -324,8 +324,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
	/* Set done to signal the vCPU threads to exit */
	done = true;

	perf_test_join_vcpu_threads(nr_vcpus);
	perf_test_destroy_vm(vm);
	memstress_join_vcpu_threads(nr_vcpus);
	memstress_destroy_vm(vm);
}

static void help(char *name)
+9 −9
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
static size_t demand_paging_size;
static char *guest_data_prototype;

static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
	struct kvm_vcpu *vcpu = vcpu_args->vcpu;
	int vcpu_idx = vcpu_args->vcpu_idx;
@@ -285,7 +285,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
	struct kvm_vm *vm;
	int r, i;

	vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
	vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size, 1,
				 p->src_type, p->partition_vcpu_memory_access);

	demand_paging_size = get_backing_src_pagesz(p->src_type);
@@ -307,11 +307,11 @@ static void run_test(enum vm_guest_mode mode, void *arg)
		TEST_ASSERT(pipefds, "Unable to allocate memory for pipefd");

		for (i = 0; i < nr_vcpus; i++) {
			struct perf_test_vcpu_args *vcpu_args;
			struct memstress_vcpu_args *vcpu_args;
			void *vcpu_hva;
			void *vcpu_alias;

			vcpu_args = &perf_test_args.vcpu_args[i];
			vcpu_args = &memstress_args.vcpu_args[i];

			/* Cache the host addresses of the region */
			vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
@@ -329,17 +329,17 @@ static void run_test(enum vm_guest_mode mode, void *arg)
					    pipefds[i * 2], p->uffd_mode,
					    p->uffd_delay, &uffd_args[i],
					    vcpu_hva, vcpu_alias,
					    vcpu_args->pages * perf_test_args.guest_page_size);
					    vcpu_args->pages * memstress_args.guest_page_size);
		}
	}

	pr_info("Finished creating vCPUs and starting uffd threads\n");

	clock_gettime(CLOCK_MONOTONIC, &start);
	perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
	memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);
	pr_info("Started all vCPUs\n");

	perf_test_join_vcpu_threads(nr_vcpus);
	memstress_join_vcpu_threads(nr_vcpus);
	ts_diff = timespec_elapsed(start);
	pr_info("All vCPU threads joined\n");

@@ -358,10 +358,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
	pr_info("Total guest execution time: %ld.%.9lds\n",
		ts_diff.tv_sec, ts_diff.tv_nsec);
	pr_info("Overall demand paging rate: %f pgs/sec\n",
		perf_test_args.vcpu_args[0].pages * nr_vcpus /
		memstress_args.vcpu_args[0].pages * nr_vcpus /
		((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));

	perf_test_destroy_vm(vm);
	memstress_destroy_vm(vm);

	free(guest_data_prototype);
	if (p->uffd_mode) {
+17 −17
Original line number Diff line number Diff line
@@ -67,7 +67,7 @@ static bool host_quit;
static int iteration;
static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];

static void vcpu_worker(struct perf_test_vcpu_args *vcpu_args)
static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
{
	struct kvm_vcpu *vcpu = vcpu_args->vcpu;
	int vcpu_idx = vcpu_args->vcpu_idx;
@@ -141,7 +141,7 @@ static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
	int i;

	for (i = 0; i < slots; i++) {
		int slot = PERF_TEST_MEM_SLOT_INDEX + i;
		int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
		int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;

		vm_mem_region_set_flags(vm, slot, flags);
@@ -163,7 +163,7 @@ static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots
	int i;

	for (i = 0; i < slots; i++) {
		int slot = PERF_TEST_MEM_SLOT_INDEX + i;
		int slot = MEMSTRESS_MEM_SLOT_INDEX + i;

		kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
	}
@@ -175,7 +175,7 @@ static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
	int i;

	for (i = 0; i < slots; i++) {
		int slot = PERF_TEST_MEM_SLOT_INDEX + i;
		int slot = MEMSTRESS_MEM_SLOT_INDEX + i;

		kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
	}
@@ -223,13 +223,13 @@ static void run_test(enum vm_guest_mode mode, void *arg)
	struct timespec clear_dirty_log_total = (struct timespec){0};
	int i;

	vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
	vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
				 p->slots, p->backing_src,
				 p->partition_vcpu_memory_access);

	pr_info("Random seed: %u\n", p->random_seed);
	perf_test_set_random_seed(vm, p->random_seed);
	perf_test_set_write_percent(vm, p->write_percent);
	memstress_set_random_seed(vm, p->random_seed);
	memstress_set_write_percent(vm, p->write_percent);

	guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm->page_shift;
	guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
@@ -259,9 +259,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
	 * occurring during the dirty memory iterations below, which
	 * would pollute the performance results.
	 */
	perf_test_set_write_percent(vm, 100);
	perf_test_set_random_access(vm, false);
	perf_test_start_vcpu_threads(nr_vcpus, vcpu_worker);
	memstress_set_write_percent(vm, 100);
	memstress_set_random_access(vm, false);
	memstress_start_vcpu_threads(nr_vcpus, vcpu_worker);

	/* Allow the vCPUs to populate memory */
	pr_debug("Starting iteration %d - Populating\n", iteration);
@@ -282,8 +282,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
	pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
		ts_diff.tv_sec, ts_diff.tv_nsec);

	perf_test_set_write_percent(vm, p->write_percent);
	perf_test_set_random_access(vm, p->random_access);
	memstress_set_write_percent(vm, p->write_percent);
	memstress_set_random_access(vm, p->random_access);

	while (iteration < p->iterations) {
		/*
@@ -345,7 +345,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
	 * wait for them to exit.
	 */
	host_quit = true;
	perf_test_join_vcpu_threads(nr_vcpus);
	memstress_join_vcpu_threads(nr_vcpus);

	avg = timespec_div(get_dirty_log_total, p->iterations);
	pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
@@ -361,7 +361,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)

	free_bitmaps(bitmaps, p->slots);
	arch_cleanup_vm(vm);
	perf_test_destroy_vm(vm);
	memstress_destroy_vm(vm);
}

static void help(char *name)
@@ -466,7 +466,7 @@ int main(int argc, char *argv[])
			guest_modes_cmdline(optarg);
			break;
		case 'n':
			perf_test_args.nested = true;
			memstress_args.nested = true;
			break;
		case 'o':
			p.partition_vcpu_memory_access = false;
@@ -500,9 +500,9 @@ int main(int argc, char *argv[])
	}

	if (pcpu_list) {
		kvm_parse_vcpu_pinning(pcpu_list, perf_test_args.vcpu_to_pcpu,
		kvm_parse_vcpu_pinning(pcpu_list, memstress_args.vcpu_to_pcpu,
				       nr_vcpus);
		perf_test_args.pin_vcpus = true;
		memstress_args.pin_vcpus = true;
	}

	TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
+15 −15
Original line number Diff line number Diff line
@@ -17,9 +17,9 @@

#define DEFAULT_PER_VCPU_MEM_SIZE	(1 << 30) /* 1G */

#define PERF_TEST_MEM_SLOT_INDEX	1
#define MEMSTRESS_MEM_SLOT_INDEX	1

struct perf_test_vcpu_args {
struct memstress_vcpu_args {
	uint64_t gpa;
	uint64_t gva;
	uint64_t pages;
@@ -29,7 +29,7 @@ struct perf_test_vcpu_args {
	int vcpu_idx;
};

struct perf_test_args {
struct memstress_args {
	struct kvm_vm *vm;
	/* The starting address and size of the guest test region. */
	uint64_t gpa;
@@ -47,26 +47,26 @@ struct perf_test_args {
	/* The vCPU=>pCPU pinning map. Only valid if pin_vcpus is true. */
	uint32_t vcpu_to_pcpu[KVM_MAX_VCPUS];

	struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
	struct memstress_vcpu_args vcpu_args[KVM_MAX_VCPUS];
};

extern struct perf_test_args perf_test_args;
extern struct memstress_args memstress_args;

struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
				   uint64_t vcpu_memory_bytes, int slots,
				   enum vm_mem_backing_src_type backing_src,
				   bool partition_vcpu_memory_access);
void perf_test_destroy_vm(struct kvm_vm *vm);
void memstress_destroy_vm(struct kvm_vm *vm);

void perf_test_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
void perf_test_set_random_access(struct kvm_vm *vm, bool random_access);
void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent);
void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed);
void memstress_set_random_access(struct kvm_vm *vm, bool random_access);

void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *));
void perf_test_join_vcpu_threads(int vcpus);
void perf_test_guest_code(uint32_t vcpu_id);
void memstress_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct memstress_vcpu_args *));
void memstress_join_vcpu_threads(int vcpus);
void memstress_guest_code(uint32_t vcpu_id);

uint64_t perf_test_nested_pages(int nr_vcpus);
void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
uint64_t memstress_nested_pages(int nr_vcpus);
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);

#endif /* SELFTEST_KVM_MEMSTRESS_H */
+34 −34
Original line number Diff line number Diff line
@@ -10,7 +10,7 @@
#include "memstress.h"
#include "processor.h"

struct perf_test_args perf_test_args;
struct memstress_args memstress_args;

/*
 * Guest virtual memory offset of the testing memory slot.
@@ -33,7 +33,7 @@ struct vcpu_thread {
static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];

/* The function run by each vCPU thread, as provided by the test. */
static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *);
static void (*vcpu_thread_fn)(struct memstress_vcpu_args *);

/* Set to true once all vCPU threads are up and running. */
static bool all_vcpu_threads_running;
@@ -44,10 +44,10 @@ static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 * Continuously write to the first 8 bytes of each page in the
 * specified region.
 */
void perf_test_guest_code(uint32_t vcpu_idx)
void memstress_guest_code(uint32_t vcpu_idx)
{
	struct perf_test_args *args = &perf_test_args;
	struct perf_test_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
	struct memstress_args *args = &memstress_args;
	struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx];
	struct guest_random_state rand_state;
	uint64_t gva;
	uint64_t pages;
@@ -82,13 +82,13 @@ void perf_test_guest_code(uint32_t vcpu_idx)
	}
}

void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
			   struct kvm_vcpu *vcpus[],
			   uint64_t vcpu_memory_bytes,
			   bool partition_vcpu_memory_access)
{
	struct perf_test_args *args = &perf_test_args;
	struct perf_test_vcpu_args *vcpu_args;
	struct memstress_args *args = &memstress_args;
	struct memstress_vcpu_args *vcpu_args;
	int i;

	for (i = 0; i < nr_vcpus; i++) {
@@ -118,12 +118,12 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int nr_vcpus,
	}
}

struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus,
				   uint64_t vcpu_memory_bytes, int slots,
				   enum vm_mem_backing_src_type backing_src,
				   bool partition_vcpu_memory_access)
{
	struct perf_test_args *args = &perf_test_args;
	struct memstress_args *args = &memstress_args;
	struct kvm_vm *vm;
	uint64_t guest_num_pages, slot0_pages = 0;
	uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
@@ -157,7 +157,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
	 * in-memory data structures.
	 */
	if (args->nested)
		slot0_pages += perf_test_nested_pages(nr_vcpus);
		slot0_pages += memstress_nested_pages(nr_vcpus);

	/*
	 * Pass guest_num_pages to populate the page tables for test memory.
@@ -165,7 +165,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
	 * effect as KVM allows aliasing HVAs in meslots.
	 */
	vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages,
				    perf_test_guest_code, vcpus);
				    memstress_guest_code, vcpus);

	args->vm = vm;

@@ -206,59 +206,59 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus,
		vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i;

		vm_userspace_mem_region_add(vm, backing_src, region_start,
					    PERF_TEST_MEM_SLOT_INDEX + i,
					    MEMSTRESS_MEM_SLOT_INDEX + i,
					    region_pages, 0);
	}

	/* Do mapping for the demand paging memory slot */
	virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages);

	perf_test_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
	memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes,
			      partition_vcpu_memory_access);

	if (args->nested) {
		pr_info("Configuring vCPUs to run in L2 (nested).\n");
		perf_test_setup_nested(vm, nr_vcpus, vcpus);
		memstress_setup_nested(vm, nr_vcpus, vcpus);
	}

	ucall_init(vm, NULL);

	/* Export the shared variables to the guest. */
	sync_global_to_guest(vm, perf_test_args);
	sync_global_to_guest(vm, memstress_args);

	return vm;
}

void perf_test_destroy_vm(struct kvm_vm *vm)
void memstress_destroy_vm(struct kvm_vm *vm)
{
	ucall_uninit(vm);
	kvm_vm_free(vm);
}

void perf_test_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent)
{
	perf_test_args.write_percent = write_percent;
	sync_global_to_guest(vm, perf_test_args.write_percent);
	memstress_args.write_percent = write_percent;
	sync_global_to_guest(vm, memstress_args.write_percent);
}

void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed)
{
	perf_test_args.random_seed = random_seed;
	sync_global_to_guest(vm, perf_test_args.random_seed);
	memstress_args.random_seed = random_seed;
	sync_global_to_guest(vm, memstress_args.random_seed);
}

void perf_test_set_random_access(struct kvm_vm *vm, bool random_access)
void memstress_set_random_access(struct kvm_vm *vm, bool random_access)
{
	perf_test_args.random_access = random_access;
	sync_global_to_guest(vm, perf_test_args.random_access);
	memstress_args.random_access = random_access;
	sync_global_to_guest(vm, memstress_args.random_access);
}

uint64_t __weak perf_test_nested_pages(int nr_vcpus)
uint64_t __weak memstress_nested_pages(int nr_vcpus)
{
	return 0;
}

void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus)
{
	pr_info("%s() not support on this architecture, skipping.\n", __func__);
	exit(KSFT_SKIP);
@@ -269,8 +269,8 @@ static void *vcpu_thread_main(void *data)
	struct vcpu_thread *vcpu = data;
	int vcpu_idx = vcpu->vcpu_idx;

	if (perf_test_args.pin_vcpus)
		kvm_pin_this_task_to_pcpu(perf_test_args.vcpu_to_pcpu[vcpu_idx]);
	if (memstress_args.pin_vcpus)
		kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]);

	WRITE_ONCE(vcpu->running, true);

@@ -283,13 +283,13 @@ static void *vcpu_thread_main(void *data)
	while (!READ_ONCE(all_vcpu_threads_running))
		;

	vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu_idx]);
	vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]);

	return NULL;
}

void perf_test_start_vcpu_threads(int nr_vcpus,
				  void (*vcpu_fn)(struct perf_test_vcpu_args *))
void memstress_start_vcpu_threads(int nr_vcpus,
				  void (*vcpu_fn)(struct memstress_vcpu_args *))
{
	int i;

@@ -313,7 +313,7 @@ void perf_test_start_vcpu_threads(int nr_vcpus,
	WRITE_ONCE(all_vcpu_threads_running, true);
}

void perf_test_join_vcpu_threads(int nr_vcpus)
void memstress_join_vcpu_threads(int nr_vcpus)
{
	int i;

Loading