Commit 26708234 authored by Anup Patel's avatar Anup Patel Committed by Anup Patel
Browse files

RISC-V: KVM: Use G-stage name for hypervisor page table



The two-stage address translation defined by the RISC-V privileged
specification defines: VS-stage (guest virtual address to guest
physical address) programmed by the Guest OS  and G-stage (guest
physical addree to host physical address) programmed by the
hypervisor.

To align with above terminology, we replace "stage2" with "gstage"
and "Stage2" with "G-stage" name everywhere in KVM RISC-V sources.

Signed-off-by: default avatarAnup Patel <apatel@ventanamicro.com>
Reviewed-by: default avatarAtish Patra <atishp@rivosinc.com>
Signed-off-by: default avatarAnup Patel <anup@brainfault.org>
parent dba90d6f
Loading
Loading
Loading
Loading
+15 −15
Original line number Diff line number Diff line
@@ -54,10 +54,10 @@ struct kvm_vmid {
};

struct kvm_arch {
	/* stage2 vmid */
	/* G-stage vmid */
	struct kvm_vmid vmid;

	/* stage2 page table */
	/* G-stage page table */
	pgd_t *pgd;
	phys_addr_t pgd_phys;

@@ -207,21 +207,21 @@ void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid);
void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa_divby_4);
void __kvm_riscv_hfence_gvma_all(void);

int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
			 struct kvm_memory_slot *memslot,
			 gpa_t gpa, unsigned long hva, bool is_write);
int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm);
void kvm_riscv_stage2_free_pgd(struct kvm *kvm);
void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu);
void kvm_riscv_stage2_mode_detect(void);
unsigned long kvm_riscv_stage2_mode(void);
int kvm_riscv_stage2_gpa_bits(void);

void kvm_riscv_stage2_vmid_detect(void);
unsigned long kvm_riscv_stage2_vmid_bits(void);
int kvm_riscv_stage2_vmid_init(struct kvm *kvm);
bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid);
void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu);
int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
void kvm_riscv_gstage_mode_detect(void);
unsigned long kvm_riscv_gstage_mode(void);
int kvm_riscv_gstage_gpa_bits(void);

void kvm_riscv_gstage_vmid_detect(void);
unsigned long kvm_riscv_gstage_vmid_bits(void);
int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);

void __kvm_riscv_unpriv_trap(void);

+4 −4
Original line number Diff line number Diff line
@@ -89,13 +89,13 @@ int kvm_arch_init(void *opaque)
		return -ENODEV;
	}

	kvm_riscv_stage2_mode_detect();
	kvm_riscv_gstage_mode_detect();

	kvm_riscv_stage2_vmid_detect();
	kvm_riscv_gstage_vmid_detect();

	kvm_info("hypervisor extension available\n");

	switch (kvm_riscv_stage2_mode()) {
	switch (kvm_riscv_gstage_mode()) {
	case HGATP_MODE_SV32X4:
		str = "Sv32x4";
		break;
@@ -110,7 +110,7 @@ int kvm_arch_init(void *opaque)
	}
	kvm_info("using %s G-stage page table format\n", str);

	kvm_info("VMID %ld bits available\n", kvm_riscv_stage2_vmid_bits());
	kvm_info("VMID %ld bits available\n", kvm_riscv_gstage_vmid_bits());

	return 0;
}
+111 −111
Original line number Diff line number Diff line
@@ -21,50 +21,50 @@
#include <asm/sbi.h>

#ifdef CONFIG_64BIT
static unsigned long stage2_mode = (HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT);
static unsigned long stage2_pgd_levels = 3;
#define stage2_index_bits	9
static unsigned long gstage_mode = (HGATP_MODE_SV39X4 << HGATP_MODE_SHIFT);
static unsigned long gstage_pgd_levels = 3;
#define gstage_index_bits	9
#else
static unsigned long stage2_mode = (HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT);
static unsigned long stage2_pgd_levels = 2;
#define stage2_index_bits	10
static unsigned long gstage_mode = (HGATP_MODE_SV32X4 << HGATP_MODE_SHIFT);
static unsigned long gstage_pgd_levels = 2;
#define gstage_index_bits	10
#endif

#define stage2_pgd_xbits	2
#define stage2_pgd_size	(1UL << (HGATP_PAGE_SHIFT + stage2_pgd_xbits))
#define stage2_gpa_bits	(HGATP_PAGE_SHIFT + \
			 (stage2_pgd_levels * stage2_index_bits) + \
			 stage2_pgd_xbits)
#define stage2_gpa_size	((gpa_t)(1ULL << stage2_gpa_bits))
#define gstage_pgd_xbits	2
#define gstage_pgd_size	(1UL << (HGATP_PAGE_SHIFT + gstage_pgd_xbits))
#define gstage_gpa_bits	(HGATP_PAGE_SHIFT + \
			 (gstage_pgd_levels * gstage_index_bits) + \
			 gstage_pgd_xbits)
#define gstage_gpa_size	((gpa_t)(1ULL << gstage_gpa_bits))

#define stage2_pte_leaf(__ptep)	\
#define gstage_pte_leaf(__ptep)	\
	(pte_val(*(__ptep)) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC))

static inline unsigned long stage2_pte_index(gpa_t addr, u32 level)
static inline unsigned long gstage_pte_index(gpa_t addr, u32 level)
{
	unsigned long mask;
	unsigned long shift = HGATP_PAGE_SHIFT + (stage2_index_bits * level);
	unsigned long shift = HGATP_PAGE_SHIFT + (gstage_index_bits * level);

	if (level == (stage2_pgd_levels - 1))
		mask = (PTRS_PER_PTE * (1UL << stage2_pgd_xbits)) - 1;
	if (level == (gstage_pgd_levels - 1))
		mask = (PTRS_PER_PTE * (1UL << gstage_pgd_xbits)) - 1;
	else
		mask = PTRS_PER_PTE - 1;

	return (addr >> shift) & mask;
}

static inline unsigned long stage2_pte_page_vaddr(pte_t pte)
static inline unsigned long gstage_pte_page_vaddr(pte_t pte)
{
	return (unsigned long)pfn_to_virt(pte_val(pte) >> _PAGE_PFN_SHIFT);
}

static int stage2_page_size_to_level(unsigned long page_size, u32 *out_level)
static int gstage_page_size_to_level(unsigned long page_size, u32 *out_level)
{
	u32 i;
	unsigned long psz = 1UL << 12;

	for (i = 0; i < stage2_pgd_levels; i++) {
		if (page_size == (psz << (i * stage2_index_bits))) {
	for (i = 0; i < gstage_pgd_levels; i++) {
		if (page_size == (psz << (i * gstage_index_bits))) {
			*out_level = i;
			return 0;
		}
@@ -73,27 +73,27 @@ static int stage2_page_size_to_level(unsigned long page_size, u32 *out_level)
	return -EINVAL;
}

static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize)
static int gstage_level_to_page_size(u32 level, unsigned long *out_pgsize)
{
	if (stage2_pgd_levels < level)
	if (gstage_pgd_levels < level)
		return -EINVAL;

	*out_pgsize = 1UL << (12 + (level * stage2_index_bits));
	*out_pgsize = 1UL << (12 + (level * gstage_index_bits));

	return 0;
}

static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
				  pte_t **ptepp, u32 *ptep_level)
{
	pte_t *ptep;
	u32 current_level = stage2_pgd_levels - 1;
	u32 current_level = gstage_pgd_levels - 1;

	*ptep_level = current_level;
	ptep = (pte_t *)kvm->arch.pgd;
	ptep = &ptep[stage2_pte_index(addr, current_level)];
	ptep = &ptep[gstage_pte_index(addr, current_level)];
	while (ptep && pte_val(*ptep)) {
		if (stage2_pte_leaf(ptep)) {
		if (gstage_pte_leaf(ptep)) {
			*ptep_level = current_level;
			*ptepp = ptep;
			return true;
@@ -102,8 +102,8 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
		if (current_level) {
			current_level--;
			*ptep_level = current_level;
			ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
			ptep = &ptep[stage2_pte_index(addr, current_level)];
			ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
			ptep = &ptep[gstage_pte_index(addr, current_level)];
		} else {
			ptep = NULL;
		}
@@ -112,12 +112,12 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
	return false;
}

static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
{
	unsigned long size = PAGE_SIZE;
	struct kvm_vmid *vmid = &kvm->arch.vmid;

	if (stage2_level_to_page_size(level, &size))
	if (gstage_level_to_page_size(level, &size))
		return;
	addr &= ~(size - 1);

@@ -131,19 +131,19 @@ static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
	preempt_enable();
}

static int stage2_set_pte(struct kvm *kvm, u32 level,
static int gstage_set_pte(struct kvm *kvm, u32 level,
			   struct kvm_mmu_memory_cache *pcache,
			   gpa_t addr, const pte_t *new_pte)
{
	u32 current_level = stage2_pgd_levels - 1;
	u32 current_level = gstage_pgd_levels - 1;
	pte_t *next_ptep = (pte_t *)kvm->arch.pgd;
	pte_t *ptep = &next_ptep[stage2_pte_index(addr, current_level)];
	pte_t *ptep = &next_ptep[gstage_pte_index(addr, current_level)];

	if (current_level < level)
		return -EINVAL;

	while (current_level != level) {
		if (stage2_pte_leaf(ptep))
		if (gstage_pte_leaf(ptep))
			return -EEXIST;

		if (!pte_val(*ptep)) {
@@ -155,23 +155,23 @@ static int stage2_set_pte(struct kvm *kvm, u32 level,
			*ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
					__pgprot(_PAGE_TABLE));
		} else {
			if (stage2_pte_leaf(ptep))
			if (gstage_pte_leaf(ptep))
				return -EEXIST;
			next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
			next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
		}

		current_level--;
		ptep = &next_ptep[stage2_pte_index(addr, current_level)];
		ptep = &next_ptep[gstage_pte_index(addr, current_level)];
	}

	*ptep = *new_pte;
	if (stage2_pte_leaf(ptep))
		stage2_remote_tlb_flush(kvm, current_level, addr);
	if (gstage_pte_leaf(ptep))
		gstage_remote_tlb_flush(kvm, current_level, addr);

	return 0;
}

static int stage2_map_page(struct kvm *kvm,
static int gstage_map_page(struct kvm *kvm,
			   struct kvm_mmu_memory_cache *pcache,
			   gpa_t gpa, phys_addr_t hpa,
			   unsigned long page_size,
@@ -182,7 +182,7 @@ static int stage2_map_page(struct kvm *kvm,
	pte_t new_pte;
	pgprot_t prot;

	ret = stage2_page_size_to_level(page_size, &level);
	ret = gstage_page_size_to_level(page_size, &level);
	if (ret)
		return ret;

@@ -193,9 +193,9 @@ static int stage2_map_page(struct kvm *kvm,
	 *    PTE so that software can update these bits.
	 *
	 * We support both options mentioned above. To achieve this, we
	 * always set 'A' and 'D' PTE bits at time of creating stage2
	 * always set 'A' and 'D' PTE bits at time of creating G-stage
	 * mapping. To support KVM dirty page logging with both options
	 * mentioned above, we will write-protect stage2 PTEs to track
	 * mentioned above, we will write-protect G-stage PTEs to track
	 * dirty pages.
	 */

@@ -213,24 +213,24 @@ static int stage2_map_page(struct kvm *kvm,
	new_pte = pfn_pte(PFN_DOWN(hpa), prot);
	new_pte = pte_mkdirty(new_pte);

	return stage2_set_pte(kvm, level, pcache, gpa, &new_pte);
	return gstage_set_pte(kvm, level, pcache, gpa, &new_pte);
}

enum stage2_op {
	STAGE2_OP_NOP = 0,	/* Nothing */
	STAGE2_OP_CLEAR,	/* Clear/Unmap */
	STAGE2_OP_WP,		/* Write-protect */
enum gstage_op {
	GSTAGE_OP_NOP = 0,	/* Nothing */
	GSTAGE_OP_CLEAR,	/* Clear/Unmap */
	GSTAGE_OP_WP,		/* Write-protect */
};

static void stage2_op_pte(struct kvm *kvm, gpa_t addr,
			  pte_t *ptep, u32 ptep_level, enum stage2_op op)
static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
			  pte_t *ptep, u32 ptep_level, enum gstage_op op)
{
	int i, ret;
	pte_t *next_ptep;
	u32 next_ptep_level;
	unsigned long next_page_size, page_size;

	ret = stage2_level_to_page_size(ptep_level, &page_size);
	ret = gstage_level_to_page_size(ptep_level, &page_size);
	if (ret)
		return;

@@ -239,31 +239,31 @@ static void stage2_op_pte(struct kvm *kvm, gpa_t addr,
	if (!pte_val(*ptep))
		return;

	if (ptep_level && !stage2_pte_leaf(ptep)) {
		next_ptep = (pte_t *)stage2_pte_page_vaddr(*ptep);
	if (ptep_level && !gstage_pte_leaf(ptep)) {
		next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
		next_ptep_level = ptep_level - 1;
		ret = stage2_level_to_page_size(next_ptep_level,
		ret = gstage_level_to_page_size(next_ptep_level,
						&next_page_size);
		if (ret)
			return;

		if (op == STAGE2_OP_CLEAR)
		if (op == GSTAGE_OP_CLEAR)
			set_pte(ptep, __pte(0));
		for (i = 0; i < PTRS_PER_PTE; i++)
			stage2_op_pte(kvm, addr + i * next_page_size,
			gstage_op_pte(kvm, addr + i * next_page_size,
					&next_ptep[i], next_ptep_level, op);
		if (op == STAGE2_OP_CLEAR)
		if (op == GSTAGE_OP_CLEAR)
			put_page(virt_to_page(next_ptep));
	} else {
		if (op == STAGE2_OP_CLEAR)
		if (op == GSTAGE_OP_CLEAR)
			set_pte(ptep, __pte(0));
		else if (op == STAGE2_OP_WP)
		else if (op == GSTAGE_OP_WP)
			set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
		stage2_remote_tlb_flush(kvm, ptep_level, addr);
		gstage_remote_tlb_flush(kvm, ptep_level, addr);
	}
}

static void stage2_unmap_range(struct kvm *kvm, gpa_t start,
static void gstage_unmap_range(struct kvm *kvm, gpa_t start,
			       gpa_t size, bool may_block)
{
	int ret;
@@ -274,9 +274,9 @@ static void stage2_unmap_range(struct kvm *kvm, gpa_t start,
	gpa_t addr = start, end = start + size;

	while (addr < end) {
		found_leaf = stage2_get_leaf_entry(kvm, addr,
		found_leaf = gstage_get_leaf_entry(kvm, addr,
						   &ptep, &ptep_level);
		ret = stage2_level_to_page_size(ptep_level, &page_size);
		ret = gstage_level_to_page_size(ptep_level, &page_size);
		if (ret)
			break;

@@ -284,8 +284,8 @@ static void stage2_unmap_range(struct kvm *kvm, gpa_t start,
			goto next;

		if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
			stage2_op_pte(kvm, addr, ptep,
				      ptep_level, STAGE2_OP_CLEAR);
			gstage_op_pte(kvm, addr, ptep,
				      ptep_level, GSTAGE_OP_CLEAR);

next:
		addr += page_size;
@@ -299,7 +299,7 @@ static void stage2_unmap_range(struct kvm *kvm, gpa_t start,
	}
}

static void stage2_wp_range(struct kvm *kvm, gpa_t start, gpa_t end)
static void gstage_wp_range(struct kvm *kvm, gpa_t start, gpa_t end)
{
	int ret;
	pte_t *ptep;
@@ -309,9 +309,9 @@ static void stage2_wp_range(struct kvm *kvm, gpa_t start, gpa_t end)
	unsigned long page_size;

	while (addr < end) {
		found_leaf = stage2_get_leaf_entry(kvm, addr,
		found_leaf = gstage_get_leaf_entry(kvm, addr,
						   &ptep, &ptep_level);
		ret = stage2_level_to_page_size(ptep_level, &page_size);
		ret = gstage_level_to_page_size(ptep_level, &page_size);
		if (ret)
			break;

@@ -319,15 +319,15 @@ static void stage2_wp_range(struct kvm *kvm, gpa_t start, gpa_t end)
			goto next;

		if (!(addr & (page_size - 1)) && ((end - addr) >= page_size))
			stage2_op_pte(kvm, addr, ptep,
				      ptep_level, STAGE2_OP_WP);
			gstage_op_pte(kvm, addr, ptep,
				      ptep_level, GSTAGE_OP_WP);

next:
		addr += page_size;
	}
}

static void stage2_wp_memory_region(struct kvm *kvm, int slot)
static void gstage_wp_memory_region(struct kvm *kvm, int slot)
{
	struct kvm_memslots *slots = kvm_memslots(kvm);
	struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
@@ -335,12 +335,12 @@ static void stage2_wp_memory_region(struct kvm *kvm, int slot)
	phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;

	spin_lock(&kvm->mmu_lock);
	stage2_wp_range(kvm, start, end);
	gstage_wp_range(kvm, start, end);
	spin_unlock(&kvm->mmu_lock);
	kvm_flush_remote_tlbs(kvm);
}

static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
static int gstage_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
			  unsigned long size, bool writable)
{
	pte_t pte;
@@ -361,12 +361,12 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
		if (!writable)
			pte = pte_wrprotect(pte);

		ret = kvm_mmu_topup_memory_cache(&pcache, stage2_pgd_levels);
		ret = kvm_mmu_topup_memory_cache(&pcache, gstage_pgd_levels);
		if (ret)
			goto out;

		spin_lock(&kvm->mmu_lock);
		ret = stage2_set_pte(kvm, 0, &pcache, addr, &pte);
		ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte);
		spin_unlock(&kvm->mmu_lock);
		if (ret)
			goto out;
@@ -388,7 +388,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
	phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
	phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;

	stage2_wp_range(kvm, start, end);
	gstage_wp_range(kvm, start, end);
}

void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
@@ -411,7 +411,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)

void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
	kvm_riscv_stage2_free_pgd(kvm);
	kvm_riscv_gstage_free_pgd(kvm);
}

void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
@@ -421,7 +421,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
	phys_addr_t size = slot->npages << PAGE_SHIFT;

	spin_lock(&kvm->mmu_lock);
	stage2_unmap_range(kvm, gpa, size, false);
	gstage_unmap_range(kvm, gpa, size, false);
	spin_unlock(&kvm->mmu_lock);
}

@@ -436,7 +436,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
	 * the memory slot is write protected.
	 */
	if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES)
		stage2_wp_memory_region(kvm, new->id);
		gstage_wp_memory_region(kvm, new->id);
}

int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -458,7 +458,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
	 * space addressable by the KVM guest GPA space.
	 */
	if ((new->base_gfn + new->npages) >=
	    (stage2_gpa_size >> PAGE_SHIFT))
	    (gstage_gpa_size >> PAGE_SHIFT))
		return -EFAULT;

	hva = new->userspace_addr;
@@ -514,7 +514,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
				goto out;
			}

			ret = stage2_ioremap(kvm, gpa, pa,
			ret = gstage_ioremap(kvm, gpa, pa,
					     vm_end - vm_start, writable);
			if (ret)
				break;
@@ -527,7 +527,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,

	spin_lock(&kvm->mmu_lock);
	if (ret)
		stage2_unmap_range(kvm, base_gpa, size, false);
		gstage_unmap_range(kvm, base_gpa, size, false);
	spin_unlock(&kvm->mmu_lock);

out:
@@ -540,7 +540,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
	if (!kvm->arch.pgd)
		return false;

	stage2_unmap_range(kvm, range->start << PAGE_SHIFT,
	gstage_unmap_range(kvm, range->start << PAGE_SHIFT,
			   (range->end - range->start) << PAGE_SHIFT,
			   range->may_block);
	return false;
@@ -556,10 +556,10 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)

	WARN_ON(range->end - range->start != 1);

	ret = stage2_map_page(kvm, NULL, range->start << PAGE_SHIFT,
	ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT,
			      __pfn_to_phys(pfn), PAGE_SIZE, true, true);
	if (ret) {
		kvm_debug("Failed to map stage2 page (error %d)\n", ret);
		kvm_debug("Failed to map G-stage page (error %d)\n", ret);
		return true;
	}

@@ -577,7 +577,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)

	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);

	if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
	if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
				   &ptep, &ptep_level))
		return false;

@@ -595,14 +595,14 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)

	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);

	if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
	if (!gstage_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
				   &ptep, &ptep_level))
		return false;

	return pte_young(*ptep);
}

int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
			 struct kvm_memory_slot *memslot,
			 gpa_t gpa, unsigned long hva, bool is_write)
{
@@ -648,9 +648,9 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
	}

	/* We need minimum second+third level pages */
	ret = kvm_mmu_topup_memory_cache(pcache, stage2_pgd_levels);
	ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
	if (ret) {
		kvm_err("Failed to topup stage2 cache\n");
		kvm_err("Failed to topup G-stage cache\n");
		return ret;
	}

@@ -680,15 +680,15 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
	if (writeable) {
		kvm_set_pfn_dirty(hfn);
		mark_page_dirty(kvm, gfn);
		ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
		ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
				      vma_pagesize, false, true);
	} else {
		ret = stage2_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
		ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
				      vma_pagesize, true, true);
	}

	if (ret)
		kvm_err("Failed to map in stage2\n");
		kvm_err("Failed to map in G-stage\n");

out_unlock:
	spin_unlock(&kvm->mmu_lock);
@@ -697,7 +697,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
	return ret;
}

int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm)
int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm)
{
	struct page *pgd_page;

@@ -707,7 +707,7 @@ int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm)
	}

	pgd_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
				get_order(stage2_pgd_size));
				get_order(gstage_pgd_size));
	if (!pgd_page)
		return -ENOMEM;
	kvm->arch.pgd = page_to_virt(pgd_page);
@@ -716,13 +716,13 @@ int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm)
	return 0;
}

void kvm_riscv_stage2_free_pgd(struct kvm *kvm)
void kvm_riscv_gstage_free_pgd(struct kvm *kvm)
{
	void *pgd = NULL;

	spin_lock(&kvm->mmu_lock);
	if (kvm->arch.pgd) {
		stage2_unmap_range(kvm, 0UL, stage2_gpa_size, false);
		gstage_unmap_range(kvm, 0UL, gstage_gpa_size, false);
		pgd = READ_ONCE(kvm->arch.pgd);
		kvm->arch.pgd = NULL;
		kvm->arch.pgd_phys = 0;
@@ -730,12 +730,12 @@ void kvm_riscv_stage2_free_pgd(struct kvm *kvm)
	spin_unlock(&kvm->mmu_lock);

	if (pgd)
		free_pages((unsigned long)pgd, get_order(stage2_pgd_size));
		free_pages((unsigned long)pgd, get_order(gstage_pgd_size));
}

void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu)
void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu)
{
	unsigned long hgatp = stage2_mode;
	unsigned long hgatp = gstage_mode;
	struct kvm_arch *k = &vcpu->kvm->arch;

	hgatp |= (READ_ONCE(k->vmid.vmid) << HGATP_VMID_SHIFT) &
@@ -744,18 +744,18 @@ void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu)

	csr_write(CSR_HGATP, hgatp);

	if (!kvm_riscv_stage2_vmid_bits())
	if (!kvm_riscv_gstage_vmid_bits())
		__kvm_riscv_hfence_gvma_all();
}

void kvm_riscv_stage2_mode_detect(void)
void kvm_riscv_gstage_mode_detect(void)
{
#ifdef CONFIG_64BIT
	/* Try Sv48x4 stage2 mode */
	/* Try Sv48x4 G-stage mode */
	csr_write(CSR_HGATP, HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
	if ((csr_read(CSR_HGATP) >> HGATP_MODE_SHIFT) == HGATP_MODE_SV48X4) {
		stage2_mode = (HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
		stage2_pgd_levels = 4;
		gstage_mode = (HGATP_MODE_SV48X4 << HGATP_MODE_SHIFT);
		gstage_pgd_levels = 4;
	}
	csr_write(CSR_HGATP, 0);

@@ -763,12 +763,12 @@ void kvm_riscv_stage2_mode_detect(void)
#endif
}

unsigned long kvm_riscv_stage2_mode(void)
unsigned long kvm_riscv_gstage_mode(void)
{
	return stage2_mode >> HGATP_MODE_SHIFT;
	return gstage_mode >> HGATP_MODE_SHIFT;
}

int kvm_riscv_stage2_gpa_bits(void)
int kvm_riscv_gstage_gpa_bits(void)
{
	return stage2_gpa_bits;
	return gstage_gpa_bits;
}
+5 −5
Original line number Diff line number Diff line
@@ -137,7 +137,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
	/* Cleanup VCPU timer */
	kvm_riscv_vcpu_timer_deinit(vcpu);

	/* Free unused pages pre-allocated for Stage2 page table mappings */
	/* Free unused pages pre-allocated for G-stage page table mappings */
	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}

@@ -635,7 +635,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	csr_write(CSR_HVIP, csr->hvip);
	csr_write(CSR_VSATP, csr->vsatp);

	kvm_riscv_stage2_update_hgatp(vcpu);
	kvm_riscv_gstage_update_hgatp(vcpu);

	kvm_riscv_vcpu_timer_restore(vcpu);

@@ -690,7 +690,7 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
			kvm_riscv_reset_vcpu(vcpu);

		if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
			kvm_riscv_stage2_update_hgatp(vcpu);
			kvm_riscv_gstage_update_hgatp(vcpu);

		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
			__kvm_riscv_hfence_gvma_all();
@@ -762,7 +762,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
		/* Check conditions before entering the guest */
		cond_resched();

		kvm_riscv_stage2_vmid_update(vcpu);
		kvm_riscv_gstage_vmid_update(vcpu);

		kvm_riscv_check_vcpu_requests(vcpu);

@@ -800,7 +800,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
		kvm_riscv_update_hvip(vcpu);

		if (ret <= 0 ||
		    kvm_riscv_stage2_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
		    kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
		    kvm_request_pending(vcpu)) {
			vcpu->mode = OUTSIDE_GUEST_MODE;
			local_irq_enable();
+3 −3
Original line number Diff line number Diff line
@@ -412,7 +412,7 @@ static int emulate_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
	return 0;
}

static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
			     struct kvm_cpu_trap *trap)
{
	struct kvm_memory_slot *memslot;
@@ -440,7 +440,7 @@ static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
		};
	}

	ret = kvm_riscv_stage2_map(vcpu, memslot, fault_addr, hva,
	ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
		(trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
	if (ret < 0)
		return ret;
@@ -686,7 +686,7 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
	case EXC_LOAD_GUEST_PAGE_FAULT:
	case EXC_STORE_GUEST_PAGE_FAULT:
		if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
			ret = stage2_page_fault(vcpu, run, trap);
			ret = gstage_page_fault(vcpu, run, trap);
		break;
	case EXC_SUPERVISOR_SYSCALL:
		if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
Loading