Commit ef18d479 authored by Yu Zhang's avatar Yu Zhang
Browse files

KVM: RISC-V: Convert to the gfn-based MMU notifier callbacks

Intel inclusion
category: feature
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I7S3VQ


CVE: NA

----------------------------------------------------------------------

Align with upstreaming risc-v code, to use the gfn-base MMU notifier
APIs, which do the hva->gfn lookup in common code.

Note: RISC-V's MMU notifier support was in introduced in upstreaming
commit 9955371c ("RISC-V: KVM: Implement MMU notifiers"), which
uses the gfn-based MMU notififer in the first place. And with commit
b4c5936c ("KVM: Kill off the old hva-based MMU notifier callbacks")
backported, we can feel safe to use the latest, upstreaming aligned
implementation now.

Also, KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS was used, so that other
archs which can not easily use the gfn-based MMU notifiers can continue
to use their hva-based code.

Signed-off-by: default avatarAnup Patel <anup.patel@wdc.com>
Signed-off-by: default avatarYu Zhang <yu.c.zhang@linux.intel.com>
parent bd42f5af
Loading
Loading
Loading
Loading
+1 −5
Original line number Diff line number Diff line
@@ -218,11 +218,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}

#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start,
			unsigned long end, unsigned int flags);
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
#define KVM_ARCH_WANT_NEW_MMU_NOTIFIER_APIS

void __kvm_riscv_hfence_gvma_vmid_gpa(unsigned long gpa, unsigned long vmid);
void __kvm_riscv_hfence_gvma_vmid(unsigned long vmid);
+29 −84
Original line number Diff line number Diff line
@@ -412,38 +412,6 @@ int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,

}

static int handle_hva_to_gpa(struct kvm *kvm,
			     unsigned long start,
			     unsigned long end,
			     int (*handler)(struct kvm *kvm,
					    gpa_t gpa, u64 size,
					    void *data),
			     void *data)
{
	struct kvm_memslots *slots;
	struct kvm_memory_slot *memslot;
	int ret = 0;

	slots = kvm_memslots(kvm);

	/* we only care about the pages that the guest sees */
	kvm_for_each_memslot(memslot, slots) {
		unsigned long hva_start, hva_end;
		gfn_t gpa;

		hva_start = max(start, memslot->userspace_addr);
		hva_end = min(end, memslot->userspace_addr +
					(memslot->npages << PAGE_SHIFT));
		if (hva_start >= hva_end)
			continue;

		gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
		ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
	}

	return ret;
}

void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
					     struct kvm_memory_slot *slot,
					     gfn_t gfn_offset,
@@ -597,94 +565,71 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
	return ret;
}

static int kvm_unmap_hva_handler(struct kvm *kvm,
				 gpa_t gpa, u64 size, void *data)
{
	unsigned int flags = *(unsigned int *)data;
	bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;

	stage2_unmap_range(kvm, gpa, size, may_block);
	return 0;
}

int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start,
			unsigned long end, unsigned int flags)
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
	if (!kvm->arch.pgd)
		return 0;

	handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
	return 0;
}

static int kvm_set_spte_handler(struct kvm *kvm,
				gpa_t gpa, u64 size, void *data)
{
	pte_t *pte = (pte_t *)data;

	WARN_ON(size != PAGE_SIZE);
	stage2_set_pte(kvm, 0, NULL, gpa, pte);

	stage2_unmap_range(kvm, range->start << PAGE_SHIFT,
			   (range->end - range->start) << PAGE_SHIFT,
			   range->may_block);
	return 0;
}

int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
	unsigned long end = hva + PAGE_SIZE;
	kvm_pfn_t pfn = pte_pfn(pte);
	pte_t stage2_pte;
	int ret;
	kvm_pfn_t pfn = pte_pfn(range->pte);

	if (!kvm->arch.pgd)
		return 0;

	stage2_pte = pfn_pte(pfn, PAGE_WRITE_EXEC);
	handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
	WARN_ON(range->end - range->start != 1);

	ret = stage2_map_page(kvm, NULL, range->start << PAGE_SHIFT,
			      __pfn_to_phys(pfn), PAGE_SIZE, true, true);
	if (ret) {
		kvm_debug("Failed to map stage2 page (error %d)\n", ret);
		return 1;
	}

	return 0;
}

static int kvm_age_hva_handler(struct kvm *kvm,
				gpa_t gpa, u64 size, void *data)
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
	pte_t *ptep;
	u32 ptep_level = 0;
	u64 size = (range->end - range->start) << PAGE_SHIFT;

	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);

	if (!stage2_get_leaf_entry(kvm, gpa, &ptep, &ptep_level))
	if (!kvm->arch.pgd)
		return 0;

	return ptep_test_and_clear_young(NULL, 0, ptep);
}
	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);

int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
{
	if (!kvm->arch.pgd)
	if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
				   &ptep, &ptep_level))
		return 0;

	return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
	return ptep_test_and_clear_young(NULL, 0, ptep);
}

static int kvm_test_age_hva_handler(struct kvm *kvm,
				    gpa_t gpa, u64 size, void *data)
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
	pte_t *ptep;
	u32 ptep_level = 0;
	u64 size = (range->end - range->start) << PAGE_SHIFT;

	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
	if (!stage2_get_leaf_entry(kvm, gpa, &ptep, &ptep_level))
	if (!kvm->arch.pgd)
		return 0;

	return pte_young(*ptep);
}
	WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PGDIR_SIZE);

int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
	if (!kvm->arch.pgd)
	if (!stage2_get_leaf_entry(kvm, range->start << PAGE_SHIFT,
				   &ptep, &ptep_level))
		return 0;

	return handle_hva_to_gpa(kvm, hva, hva,
				 kvm_test_age_hva_handler, NULL);
	return pte_young(*ptep);
}

int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,