Commit 9955371c authored by Anup Patel's avatar Anup Patel Committed by Anup Patel
Browse files

RISC-V: KVM: Implement MMU notifiers



This patch implements MMU notifiers for KVM RISC-V so that Guest
physical address space is in-sync with Host physical address space.

This will allow swapping, page migration, etc to work transparently
with KVM RISC-V.

Signed-off-by: default avatarAnup Patel <anup.patel@wdc.com>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarAlexander Graf <graf@amazon.com>
Acked-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent 9d05c1fe
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -196,6 +196,8 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}

#define KVM_ARCH_WANT_MMU_NOTIFIER

void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa, unsigned long vmid);
void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid);
void __kvm_riscv_hfence_gvma_gpa(unsigned long gpa);
+1 −0
Original line number Diff line number Diff line
@@ -20,6 +20,7 @@ if VIRTUALIZATION
config KVM
	tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)"
	depends on RISCV_SBI && MMU
	select MMU_NOTIFIER
	select PREEMPT_NOTIFIERS
	select ANON_INODES
	select KVM_MMIO
+85 −5
Original line number Diff line number Diff line
@@ -300,7 +300,8 @@ static void stage2_op_pte(struct kvm *kvm, gpa_t addr,
	}
}

static void stage2_unmap_range(struct kvm *kvm, gpa_t start, gpa_t size)
static void stage2_unmap_range(struct kvm *kvm, gpa_t start,
			       gpa_t size, bool may_block)
{
	int ret;
	pte_t *ptep;
@@ -325,6 +326,13 @@ static void stage2_unmap_range(struct kvm *kvm, gpa_t start, gpa_t size)

next:
		addr += page_size;

		/*
		 * If the range is too large, release the kvm->mmu_lock
		 * to prevent starvation and lockup detector warnings.
		 */
		if (may_block && addr < end)
			cond_resched_lock(&kvm->mmu_lock);
	}
}

@@ -405,7 +413,6 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
out:
	stage2_cache_flush(&pcache);
	return ret;

}

void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
@@ -547,7 +554,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
	spin_lock(&kvm->mmu_lock);
	if (ret)
		stage2_unmap_range(kvm, mem->guest_phys_addr,
				   mem->memory_size);
				   mem->memory_size, false);
	spin_unlock(&kvm->mmu_lock);

out:
@@ -555,6 +562,73 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
	return ret;
}

bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
	if (!kvm->arch.pgd)
		return 0;

	stage2_unmap_range(kvm, range->start << PAGE_SHIFT,
			   (range->end - range->start) << PAGE_SHIFT,
			   range->may_block);
	return 0;
}

bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
	int ret;
	kvm_pfn_t pfn = pte_pfn(range->pte);

	if (!kvm->arch.pgd)
		return 0;

	WARN_ON(range->end - range->start != 1);

	ret = stage2_map_page(kvm, NULL, range->start << PAGE_SHIFT,
			      __pfn_to_phys(pfn), PAGE_SIZE, true, true);
	if (ret) {
		kvm_debug("Failed to map stage2 page (error %d)\n", ret);
		return 1;
	}

	return 0;
}

bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
	pte_t *ptep;
	u32 ptep_level = 0;
	u64 size = (range->end - range->start) << PAGE_SHIFT;

	if (!kvm->arch.pgd)
		return 0;

	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);

	if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
				   &ptep, &ptep_level))
		return 0;

	return ptep_test_and_clear_young(NULL, 0, ptep);
}

bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
	pte_t *ptep;
	u32 ptep_level = 0;
	u64 size = (range->end - range->start) << PAGE_SHIFT;

	if (!kvm->arch.pgd)
		return 0;

	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);

	if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
				   &ptep, &ptep_level))
		return 0;

	return pte_young(*ptep);
}

int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
			 struct kvm_memory_slot *memslot,
			 gpa_t gpa, unsigned long hva, bool is_write)
@@ -569,7 +643,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
	struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache;
	bool logging = (memslot->dirty_bitmap &&
			!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
	unsigned long vma_pagesize;
	unsigned long vma_pagesize, mmu_seq;

	mmap_read_lock(current->mm);

@@ -608,6 +682,8 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
		return ret;
	}

	mmu_seq = kvm->mmu_notifier_seq;

	hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writeable);
	if (hfn == KVM_PFN_ERR_HWPOISON) {
		send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
@@ -626,6 +702,9 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,

	spin_lock(&kvm->mmu_lock);

	if (mmu_notifier_retry(kvm, mmu_seq))
		goto out_unlock;

	if (writeable) {
		kvm_set_pfn_dirty(hfn);
		mark_page_dirty(kvm, gfn);
@@ -639,6 +718,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
	if (ret)
		kvm_err("Failed to map in stage2\n");

out_unlock:
	spin_unlock(&kvm->mmu_lock);
	kvm_set_pfn_accessed(hfn);
	kvm_release_pfn_clean(hfn);
@@ -675,7 +755,7 @@ void kvm_riscv_stage2_free_pgd(struct kvm *kvm)

	spin_lock(&kvm->mmu_lock);
	if (kvm->arch.pgd) {
		stage2_unmap_range(kvm, 0UL, stage2_gpa_size);
		stage2_unmap_range(kvm, 0UL, stage2_gpa_size, false);
		pgd = READ_ONCE(kvm->arch.pgd);
		kvm->arch.pgd = NULL;
		kvm->arch.pgd_phys = 0;
+1 −0
Original line number Diff line number Diff line
@@ -65,6 +65,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
	case KVM_CAP_IOEVENTFD:
	case KVM_CAP_DEVICE_CTRL:
	case KVM_CAP_USER_MEMORY:
	case KVM_CAP_SYNC_MMU:
	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
	case KVM_CAP_ONE_REG:
	case KVM_CAP_READONLY_MEM: