Commit 3e06cdf1 authored by Anup Patel's avatar Anup Patel Committed by Anup Patel
Browse files

KVM: selftests: Add initial support for RISC-V 64-bit



We add initial support for RISC-V 64-bit in KVM selftests using
which we can cross-compile and run arch independent tests such as:
demand_paging_test
dirty_log_test
kvm_create_max_vcpus,
kvm_page_table_test
set_memory_region_test
kvm_binary_stats_test

All VM guest modes defined in kvm_util.h require at least 48-bit
guest virtual address so to use KVM RISC-V selftests hardware
need to support at least Sv48 MMU for guest (i.e. VS-mode).

Signed-off-by: default avatarAnup Patel <anup.patel@wdc.com>
Reviewed-and-tested-by: default avatarAtish Patra <atishp@rivosinc.com>
parent 788490e7
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -32,11 +32,16 @@ endif
ifeq ($(ARCH),s390)
	UNAME_M := s390x
endif
# Set UNAME_M riscv compile/install to work
ifeq ($(ARCH),riscv)
	UNAME_M := riscv
endif

LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c lib/aarch64/vgic.c
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
LIBKVM_riscv = lib/riscv/processor.c lib/riscv/ucall.c

TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
@@ -119,6 +124,13 @@ TEST_GEN_PROGS_s390x += rseq_test
TEST_GEN_PROGS_s390x += set_memory_region_test
TEST_GEN_PROGS_s390x += kvm_binary_stats_test

TEST_GEN_PROGS_riscv += demand_paging_test
TEST_GEN_PROGS_riscv += dirty_log_test
TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
TEST_GEN_PROGS_riscv += kvm_page_table_test
TEST_GEN_PROGS_riscv += set_memory_region_test
TEST_GEN_PROGS_riscv += kvm_binary_stats_test

TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
LIBKVM += $(LIBKVM_$(UNAME_M))

+10 −0
Original line number Diff line number Diff line
@@ -69,6 +69,16 @@ enum vm_guest_mode {
#define MIN_PAGE_SHIFT			12U
#define ptes_per_page(page_size)	((page_size) / 16)

#elif defined(__riscv)

#if __riscv_xlen == 32
#error "RISC-V 32-bit kvm selftests not supported"
#endif

#define VM_MODE_DEFAULT			VM_MODE_P40V48_4K
#define MIN_PAGE_SHIFT			12U
#define ptes_per_page(page_size)	((page_size) / 8)

#endif

#define MIN_PAGE_SIZE		(1U << MIN_PAGE_SHIFT)
+135 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * RISC-V processor specific defines
 *
 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
 */
#ifndef SELFTEST_KVM_PROCESSOR_H
#define SELFTEST_KVM_PROCESSOR_H

#include "kvm_util.h"
#include <linux/stringify.h>

static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
				    uint64_t  size)
{
	return KVM_REG_RISCV | type | idx | size;
}

#if __riscv_xlen == 64
#define KVM_REG_SIZE_ULONG	KVM_REG_SIZE_U64
#else
#define KVM_REG_SIZE_ULONG	KVM_REG_SIZE_U32
#endif

#define RISCV_CONFIG_REG(name)	__kvm_reg_id(KVM_REG_RISCV_CONFIG, \
					     KVM_REG_RISCV_CONFIG_REG(name), \
					     KVM_REG_SIZE_ULONG)

#define RISCV_CORE_REG(name)	__kvm_reg_id(KVM_REG_RISCV_CORE, \
					     KVM_REG_RISCV_CORE_REG(name), \
					     KVM_REG_SIZE_ULONG)

#define RISCV_CSR_REG(name)	__kvm_reg_id(KVM_REG_RISCV_CSR, \
					     KVM_REG_RISCV_CSR_REG(name), \
					     KVM_REG_SIZE_ULONG)

#define RISCV_TIMER_REG(name)	__kvm_reg_id(KVM_REG_RISCV_TIMER, \
					     KVM_REG_RISCV_TIMER_REG(name), \
					     KVM_REG_SIZE_U64)

static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
			   unsigned long *addr)
{
	struct kvm_one_reg reg;

	reg.id = id;
	reg.addr = (unsigned long)addr;
	vcpu_get_reg(vm, vcpuid, &reg);
}

static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
			   unsigned long val)
{
	struct kvm_one_reg reg;

	reg.id = id;
	reg.addr = (unsigned long)&val;
	vcpu_set_reg(vm, vcpuid, &reg);
}

/* L3 index Bit[47:39] */
#define PGTBL_L3_INDEX_MASK			0x0000FF8000000000ULL
#define PGTBL_L3_INDEX_SHIFT			39
#define PGTBL_L3_BLOCK_SHIFT			39
#define PGTBL_L3_BLOCK_SIZE			0x0000008000000000ULL
#define PGTBL_L3_MAP_MASK			(~(PGTBL_L3_BLOCK_SIZE - 1))
/* L2 index Bit[38:30] */
#define PGTBL_L2_INDEX_MASK			0x0000007FC0000000ULL
#define PGTBL_L2_INDEX_SHIFT			30
#define PGTBL_L2_BLOCK_SHIFT			30
#define PGTBL_L2_BLOCK_SIZE			0x0000000040000000ULL
#define PGTBL_L2_MAP_MASK			(~(PGTBL_L2_BLOCK_SIZE - 1))
/* L1 index Bit[29:21] */
#define PGTBL_L1_INDEX_MASK			0x000000003FE00000ULL
#define PGTBL_L1_INDEX_SHIFT			21
#define PGTBL_L1_BLOCK_SHIFT			21
#define PGTBL_L1_BLOCK_SIZE			0x0000000000200000ULL
#define PGTBL_L1_MAP_MASK			(~(PGTBL_L1_BLOCK_SIZE - 1))
/* L0 index Bit[20:12] */
#define PGTBL_L0_INDEX_MASK			0x00000000001FF000ULL
#define PGTBL_L0_INDEX_SHIFT			12
#define PGTBL_L0_BLOCK_SHIFT			12
#define PGTBL_L0_BLOCK_SIZE			0x0000000000001000ULL
#define PGTBL_L0_MAP_MASK			(~(PGTBL_L0_BLOCK_SIZE - 1))

#define PGTBL_PTE_ADDR_MASK			0x003FFFFFFFFFFC00ULL
#define PGTBL_PTE_ADDR_SHIFT			10
#define PGTBL_PTE_RSW_MASK			0x0000000000000300ULL
#define PGTBL_PTE_RSW_SHIFT			8
#define PGTBL_PTE_DIRTY_MASK			0x0000000000000080ULL
#define PGTBL_PTE_DIRTY_SHIFT			7
#define PGTBL_PTE_ACCESSED_MASK			0x0000000000000040ULL
#define PGTBL_PTE_ACCESSED_SHIFT		6
#define PGTBL_PTE_GLOBAL_MASK			0x0000000000000020ULL
#define PGTBL_PTE_GLOBAL_SHIFT			5
#define PGTBL_PTE_USER_MASK			0x0000000000000010ULL
#define PGTBL_PTE_USER_SHIFT			4
#define PGTBL_PTE_EXECUTE_MASK			0x0000000000000008ULL
#define PGTBL_PTE_EXECUTE_SHIFT			3
#define PGTBL_PTE_WRITE_MASK			0x0000000000000004ULL
#define PGTBL_PTE_WRITE_SHIFT			2
#define PGTBL_PTE_READ_MASK			0x0000000000000002ULL
#define PGTBL_PTE_READ_SHIFT			1
#define PGTBL_PTE_PERM_MASK			(PGTBL_PTE_EXECUTE_MASK | \
						 PGTBL_PTE_WRITE_MASK | \
						 PGTBL_PTE_READ_MASK)
#define PGTBL_PTE_VALID_MASK			0x0000000000000001ULL
#define PGTBL_PTE_VALID_SHIFT			0

#define PGTBL_PAGE_SIZE				PGTBL_L0_BLOCK_SIZE
#define PGTBL_PAGE_SIZE_SHIFT			PGTBL_L0_BLOCK_SHIFT

#define SATP_PPN				_AC(0x00000FFFFFFFFFFF, UL)
#define SATP_MODE_39				_AC(0x8000000000000000, UL)
#define SATP_MODE_48				_AC(0x9000000000000000, UL)
#define SATP_ASID_BITS				16
#define SATP_ASID_SHIFT				44
#define SATP_ASID_MASK				_AC(0xFFFF, UL)

#define SBI_EXT_EXPERIMENTAL_START	0x08000000
#define SBI_EXT_EXPERIMENTAL_END	0x08FFFFFF

#define KVM_RISCV_SELFTESTS_SBI_EXT	SBI_EXT_EXPERIMENTAL_END

struct sbiret {
	long error;
	long value;
};

struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
			unsigned long arg1, unsigned long arg2,
			unsigned long arg3, unsigned long arg4,
			unsigned long arg5);

#endif /* SELFTEST_KVM_PROCESSOR_H */
+10 −0
Original line number Diff line number Diff line
@@ -38,6 +38,16 @@ void guest_modes_append_default(void)
			guest_mode_append(VM_MODE_P47V64_4K, true, true);
	}
#endif
#ifdef __riscv
	{
		unsigned int sz = kvm_check_cap(KVM_CAP_VM_GPA_BITS);

		if (sz >= 52)
			guest_mode_append(VM_MODE_P52V48_4K, true, true);
		if (sz >= 48)
			guest_mode_append(VM_MODE_P48V48_4K, true, true);
	}
#endif
}

void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg)
+362 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * RISC-V code
 *
 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
 */

#include <linux/compiler.h>
#include <assert.h>

#include "kvm_util.h"
#include "../kvm_util_internal.h"
#include "processor.h"

#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN	0xac0000

static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
{
	return (v + vm->page_size) & ~(vm->page_size - 1);
}

static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
{
	return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) <<
		PGTBL_PAGE_SIZE_SHIFT;
}

static uint64_t ptrs_per_pte(struct kvm_vm *vm)
{
	return PGTBL_PAGE_SIZE / sizeof(uint64_t);
}

static uint64_t pte_index_mask[] = {
	PGTBL_L0_INDEX_MASK,
	PGTBL_L1_INDEX_MASK,
	PGTBL_L2_INDEX_MASK,
	PGTBL_L3_INDEX_MASK,
};

static uint32_t pte_index_shift[] = {
	PGTBL_L0_INDEX_SHIFT,
	PGTBL_L1_INDEX_SHIFT,
	PGTBL_L2_INDEX_SHIFT,
	PGTBL_L3_INDEX_SHIFT,
};

static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level)
{
	TEST_ASSERT(level > -1,
		"Negative page table level (%d) not possible", level);
	TEST_ASSERT(level < vm->pgtable_levels,
		"Invalid page table level (%d)", level);

	return (gva & pte_index_mask[level]) >> pte_index_shift[level];
}

void virt_pgd_alloc(struct kvm_vm *vm)
{
	if (!vm->pgd_created) {
		vm_paddr_t paddr = vm_phy_pages_alloc(vm,
			page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size,
			KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
		vm->pgd = paddr;
		vm->pgd_created = true;
	}
}

void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
{
	uint64_t *ptep, next_ppn;
	int level = vm->pgtable_levels - 1;

	TEST_ASSERT((vaddr % vm->page_size) == 0,
		"Virtual address not on page boundary,\n"
		"  vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
	TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
		(vaddr >> vm->page_shift)),
		"Invalid virtual address, vaddr: 0x%lx", vaddr);
	TEST_ASSERT((paddr % vm->page_size) == 0,
		"Physical address not on page boundary,\n"
		"  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
	TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
		"Physical address beyond maximum supported,\n"
		"  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
		paddr, vm->max_gfn, vm->page_size);

	ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8;
	if (!*ptep) {
		next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT;
		*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
			PGTBL_PTE_VALID_MASK;
	}
	level--;

	while (level > -1) {
		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
		       pte_index(vm, vaddr, level) * 8;
		if (!*ptep && level > 0) {
			next_ppn = vm_alloc_page_table(vm) >>
				   PGTBL_PAGE_SIZE_SHIFT;
			*ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) |
				PGTBL_PTE_VALID_MASK;
		}
		level--;
	}

	paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT;
	*ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) |
		PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK;
}

vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
{
	uint64_t *ptep;
	int level = vm->pgtable_levels - 1;

	if (!vm->pgd_created)
		goto unmapped_gva;

	ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8;
	if (!ptep)
		goto unmapped_gva;
	level--;

	while (level > -1) {
		ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) +
		       pte_index(vm, gva, level) * 8;
		if (!ptep)
			goto unmapped_gva;
		level--;
	}

	return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));

unmapped_gva:
	TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d",
		  gva, level);
	exit(1);
}

static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent,
		     uint64_t page, int level)
{
#ifdef DEBUG
	static const char *const type[] = { "pte", "pmd", "pud", "p4d"};
	uint64_t pte, *ptep;

	if (level < 0)
		return;

	for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
		ptep = addr_gpa2hva(vm, pte);
		if (!*ptep)
			continue;
		fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "",
			type[level], pte, *ptep, ptep);
		pte_dump(stream, vm, indent + 1,
			 pte_addr(vm, *ptep), level - 1);
	}
#endif
}

void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
{
	int level = vm->pgtable_levels - 1;
	uint64_t pgd, *ptep;

	if (!vm->pgd_created)
		return;

	for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) {
		ptep = addr_gpa2hva(vm, pgd);
		if (!*ptep)
			continue;
		fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "",
			pgd, *ptep, ptep);
		pte_dump(stream, vm, indent + 1,
			 pte_addr(vm, *ptep), level - 1);
	}
}

void riscv_vcpu_mmu_setup(struct kvm_vm *vm, int vcpuid)
{
	unsigned long satp;

	/*
	 * The RISC-V Sv48 MMU mode supports 56-bit physical address
	 * for 48-bit virtual address with 4KB last level page size.
	 */
	switch (vm->mode) {
	case VM_MODE_P52V48_4K:
	case VM_MODE_P48V48_4K:
	case VM_MODE_P40V48_4K:
		break;
	default:
		TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
	}

	satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN;
	satp |= SATP_MODE_48;

	set_reg(vm, vcpuid, RISCV_CSR_REG(satp), satp);
}

void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
{
	struct kvm_riscv_core core;

	get_reg(vm, vcpuid, RISCV_CORE_REG(mode), &core.mode);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc), &core.regs.pc);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.ra), &core.regs.ra);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp), &core.regs.sp);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), &core.regs.gp);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.tp), &core.regs.tp);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t0), &core.regs.t0);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t1), &core.regs.t1);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t2), &core.regs.t2);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s0), &core.regs.s0);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s1), &core.regs.s1);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a0), &core.regs.a0);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a1), &core.regs.a1);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a2), &core.regs.a2);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a3), &core.regs.a3);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a4), &core.regs.a4);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a5), &core.regs.a5);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a6), &core.regs.a6);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.a7), &core.regs.a7);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s2), &core.regs.s2);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s3), &core.regs.s3);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s4), &core.regs.s4);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s5), &core.regs.s5);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s6), &core.regs.s6);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s7), &core.regs.s7);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s8), &core.regs.s8);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s9), &core.regs.s9);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s10), &core.regs.s10);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.s11), &core.regs.s11);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t3), &core.regs.t3);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t4), &core.regs.t4);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t5), &core.regs.t5);
	get_reg(vm, vcpuid, RISCV_CORE_REG(regs.t6), &core.regs.t6);

	fprintf(stream,
		" MODE:  0x%lx\n", core.mode);
	fprintf(stream,
		" PC: 0x%016lx   RA: 0x%016lx SP: 0x%016lx GP: 0x%016lx\n",
		core.regs.pc, core.regs.ra, core.regs.sp, core.regs.gp);
	fprintf(stream,
		" TP: 0x%016lx   T0: 0x%016lx T1: 0x%016lx T2: 0x%016lx\n",
		core.regs.tp, core.regs.t0, core.regs.t1, core.regs.t2);
	fprintf(stream,
		" S0: 0x%016lx   S1: 0x%016lx A0: 0x%016lx A1: 0x%016lx\n",
		core.regs.s0, core.regs.s1, core.regs.a0, core.regs.a1);
	fprintf(stream,
		" A2: 0x%016lx   A3: 0x%016lx A4: 0x%016lx A5: 0x%016lx\n",
		core.regs.a2, core.regs.a3, core.regs.a4, core.regs.a5);
	fprintf(stream,
		" A6: 0x%016lx   A7: 0x%016lx S2: 0x%016lx S3: 0x%016lx\n",
		core.regs.a6, core.regs.a7, core.regs.s2, core.regs.s3);
	fprintf(stream,
		" S4: 0x%016lx   S5: 0x%016lx S6: 0x%016lx S7: 0x%016lx\n",
		core.regs.s4, core.regs.s5, core.regs.s6, core.regs.s7);
	fprintf(stream,
		" S8: 0x%016lx   S9: 0x%016lx S10: 0x%016lx S11: 0x%016lx\n",
		core.regs.s8, core.regs.s9, core.regs.s10, core.regs.s11);
	fprintf(stream,
		" T3: 0x%016lx   T4: 0x%016lx T5: 0x%016lx T6: 0x%016lx\n",
		core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
}

static void guest_hang(void)
{
	while (1)
		;
}

void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
{
	int r;
	size_t stack_size = vm->page_size == 4096 ?
					DEFAULT_STACK_PGS * vm->page_size :
					vm->page_size;
	unsigned long stack_vaddr = vm_vaddr_alloc(vm, stack_size,
					DEFAULT_RISCV_GUEST_STACK_VADDR_MIN);
	unsigned long current_gp = 0;
	struct kvm_mp_state mps;

	vm_vcpu_add(vm, vcpuid);
	riscv_vcpu_mmu_setup(vm, vcpuid);

	/*
	 * With SBI HSM support in KVM RISC-V, all secondary VCPUs are
	 * powered-off by default so we ensure that all secondary VCPUs
	 * are powered-on using KVM_SET_MP_STATE ioctl().
	 */
	mps.mp_state = KVM_MP_STATE_RUNNABLE;
	r = _vcpu_ioctl(vm, vcpuid, KVM_SET_MP_STATE, &mps);
	TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r);

	/* Setup global pointer of guest to be same as the host */
	asm volatile (
		"add %0, gp, zero" : "=r" (current_gp) : : "memory");
	set_reg(vm, vcpuid, RISCV_CORE_REG(regs.gp), current_gp);

	/* Setup stack pointer and program counter of guest */
	set_reg(vm, vcpuid, RISCV_CORE_REG(regs.sp),
		stack_vaddr + stack_size);
	set_reg(vm, vcpuid, RISCV_CORE_REG(regs.pc),
		(unsigned long)guest_code);

	/* Setup default exception vector of guest */
	set_reg(vm, vcpuid, RISCV_CSR_REG(stvec),
		(unsigned long)guest_hang);
}

void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
{
	va_list ap;
	uint64_t id = RISCV_CORE_REG(regs.a0);
	int i;

	TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
		    "  num: %u\n", num);

	va_start(ap, num);

	for (i = 0; i < num; i++) {
		switch (i) {
		case 0:
			id = RISCV_CORE_REG(regs.a0);
			break;
		case 1:
			id = RISCV_CORE_REG(regs.a1);
			break;
		case 2:
			id = RISCV_CORE_REG(regs.a2);
			break;
		case 3:
			id = RISCV_CORE_REG(regs.a3);
			break;
		case 4:
			id = RISCV_CORE_REG(regs.a4);
			break;
		case 5:
			id = RISCV_CORE_REG(regs.a5);
			break;
		case 6:
			id = RISCV_CORE_REG(regs.a6);
			break;
		case 7:
			id = RISCV_CORE_REG(regs.a7);
			break;
		};
		set_reg(vm, vcpuid, id, va_arg(ap, uint64_t));
	}

	va_end(ap);
}

void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
{
}
Loading