Commit 58ceabc1 authored by Chen Wang's avatar Chen Wang Committed by guzitao
Browse files

sw64: kvm: use generic kvm mmu memory caches

Sunway inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IBDJNZ



--------------------------------

Switch to the generic MMU memory cache implementation.

Besides, setting pud, pmd and pte page tables will call
mmu_memory_cache_alloc() to request memory cache pages, so
increase KVM_MMU_CACHE_MIN_PAGES to 3 to avoid kernel panic.

Signed-off-by: default avatarChen Wang <chenwang@wxiat.com>
Reviewed-by: default avatarHe Sheng <hesheng@wxiat.com>
Signed-off-by: default avatarGu Zitao <guzitao@wxiat.com>
parent a02b493f
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -2,7 +2,6 @@

generic-y += clkdev.h
generic-y += export.h
generic-y += kvm_types.h
generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += param.h
+0 −11
Original line number Diff line number Diff line
@@ -76,17 +76,6 @@ struct kvm_arch {
	pgd_t *pgd;
};

#define KVM_NR_MEM_OBJS		40

/*
 * We don't want allocation failures within the mmu code, so we preallocate
 * enough memory for a single page fault in a cache.
 */
struct kvm_mmu_memory_cache {
	int nobjs;
	void *objects[KVM_NR_MEM_OBJS];
};

struct kvm_vcpu_arch {
	struct kvm_regs regs __attribute__((__aligned__(32)));
	struct vcpucb vcb;
+1 −1
Original line number Diff line number Diff line
@@ -29,7 +29,7 @@
#define AF_STATUS_FOE			0x8
#define AF_STATUS_INV			0x10

#define KVM_MMU_CACHE_MIN_PAGES		2
#define KVM_MMU_CACHE_MIN_PAGES		3

static inline void kvm_set_aptpte_readonly(pte_t *pte)
{
+8 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SW64_KVM_TYPES_H
#define _ASM_SW64_KVM_TYPES_H

#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40

#endif /* _ASM_SW64_KVM_TYPES_H */
+8 −45
Original line number Diff line number Diff line
@@ -101,43 +101,6 @@ static void apt_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
	put_page(virt_to_page(pudp));
}

static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
		int min, int max)
{
	void *page;

	BUG_ON(max > KVM_NR_MEM_OBJS);
	if (cache->nobjs >= min)
		return 0;
	while (cache->nobjs < max) {
		page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
		if (!page)
			return -ENOMEM;
		cache->objects[cache->nobjs++] = page;
	}
	return 0;
}

static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
{
	while (mc->nobjs)
		free_page((unsigned long)mc->objects[--mc->nobjs]);
}

void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
	mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}

static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
	void *p;

	BUG_ON(!mc || !mc->nobjs);
	p = mc->objects[--mc->nobjs];
	return p;
}

static void unmap_apt_ptes(struct kvm *kvm, pmd_t *pmd,
		phys_addr_t addr, phys_addr_t end)
{
@@ -360,7 +323,7 @@ static pud_t *apt_get_pud(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
	if (p4d_none(*p4d)) {
		if (!cache)
			return NULL;
		pud = mmu_memory_cache_alloc(cache);
		pud = kvm_mmu_memory_cache_alloc(cache);
		p4d_populate(NULL, p4d, pud);
		get_page(virt_to_page(p4d));
	}
@@ -380,7 +343,7 @@ static pmd_t *apt_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
	if (pud_none(*pud)) {
		if (!cache)
			return NULL;
		pmd = mmu_memory_cache_alloc(cache);
		pmd = kvm_mmu_memory_cache_alloc(cache);
		pud_populate(NULL, pud, pmd);
		get_page(virt_to_page(pud));
	}
@@ -840,7 +803,7 @@ static int apt_set_pte_fast(struct kvm_vcpu *vcpu,
	if (pud_none(*pud)) {
		if (!cache)
			return 0; /* ignore calls from kvm_set_spte_hva */
		pmd = mmu_memory_cache_alloc(cache);
		pmd = kvm_mmu_memory_cache_alloc(cache);
		pud_populate(NULL, pud, pmd);
		get_page(virt_to_page(pud));
	}
@@ -866,7 +829,7 @@ static int apt_set_pte_fast(struct kvm_vcpu *vcpu,
	if (pmd_none(*pmd)) {
		if (!cache)
			return 0; /* ignore calls from kvm_set_spte_hva */
		pte = mmu_memory_cache_alloc(cache);
		pte = kvm_mmu_memory_cache_alloc(cache);
		pmd_populate_kernel(NULL, pmd, pte);
		get_page(virt_to_page(pmd));
	}
@@ -928,7 +891,7 @@ static int apt_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
	if (pud_none(*pud)) {
		if (!cache)
			return 0; /* ignore calls from kvm_set_spte_hva */
		pmd = mmu_memory_cache_alloc(cache);
		pmd = kvm_mmu_memory_cache_alloc(cache);
		pud_populate(NULL, pud, pmd);
		get_page(virt_to_page(pud));
	}
@@ -953,7 +916,7 @@ static int apt_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
	if (pmd_none(*pmd)) {
		if (!cache)
			return 0; /* ignore calls from kvm_set_spte_hva */
		pte = mmu_memory_cache_alloc(cache);
		pte = kvm_mmu_memory_cache_alloc(cache);
		pmd_populate_kernel(NULL, pmd, pte);
		get_page(virt_to_page(pmd));
	}
@@ -1199,8 +1162,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu,
		gfn = (fault_gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
	up_read(&current->mm->mmap_lock);
	/* We need minimum second+third level pages */
	ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
				     KVM_NR_MEM_OBJS);
	ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);

	if (ret)
		return ret;

Loading