Commit 46c88622 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch kvm-arm64/mmu/reduce-vmemmap-overhead into kvmarm-master/next

Host stage-2 optimisations from Quentin Perret

* kvm-arm64/mmu/reduce-vmemmap-overhead:
  KVM: arm64: Use less bits for hyp_page refcount
  KVM: arm64: Use less bits for hyp_page order
  KVM: arm64: Remove hyp_pool pointer from struct hyp_page
  KVM: arm64: Unify MMIO and mem host stage-2 pools
  KVM: arm64: Remove list_head from hyp_page
  KVM: arm64: Use refcount at hyp to check page availability
  KVM: arm64: Move hyp_pool locking out of refcount helpers
parents 32ab5a5e 6929586d
Loading
Loading
Loading
Loading
+5 −40
Original line number Diff line number Diff line
@@ -7,7 +7,7 @@
#include <nvhe/memory.h>
#include <nvhe/spinlock.h>

#define HYP_NO_ORDER	UINT_MAX
#define HYP_NO_ORDER	USHRT_MAX

struct hyp_pool {
	/*
@@ -19,48 +19,13 @@ struct hyp_pool {
	struct list_head free_area[MAX_ORDER];
	phys_addr_t range_start;
	phys_addr_t range_end;
	unsigned int max_order;
	unsigned short max_order;
};

static inline void hyp_page_ref_inc(struct hyp_page *p)
{
	struct hyp_pool *pool = hyp_page_to_pool(p);

	hyp_spin_lock(&pool->lock);
	p->refcount++;
	hyp_spin_unlock(&pool->lock);
}

static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
{
	struct hyp_pool *pool = hyp_page_to_pool(p);
	int ret;

	hyp_spin_lock(&pool->lock);
	p->refcount--;
	ret = (p->refcount == 0);
	hyp_spin_unlock(&pool->lock);

	return ret;
}

static inline void hyp_set_page_refcounted(struct hyp_page *p)
{
	struct hyp_pool *pool = hyp_page_to_pool(p);

	hyp_spin_lock(&pool->lock);
	if (p->refcount) {
		hyp_spin_unlock(&pool->lock);
		BUG();
	}
	p->refcount = 1;
	hyp_spin_unlock(&pool->lock);
}

/* Allocation */
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned int order);
void hyp_get_page(void *addr);
void hyp_put_page(void *addr);
void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
void hyp_get_page(struct hyp_pool *pool, void *addr);
void hyp_put_page(struct hyp_pool *pool, void *addr);

/* Used pages cannot be freed */
int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
+1 −1
Original line number Diff line number Diff line
@@ -23,7 +23,7 @@ extern struct host_kvm host_kvm;
int __pkvm_prot_finalize(void);
int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);

int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool);
int kvm_host_prepare_stage2(void *pgt_pool_base);
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);

static __always_inline void __load_host_stage2(void)
+2 −5
Original line number Diff line number Diff line
@@ -7,12 +7,9 @@

#include <linux/types.h>

struct hyp_pool;
struct hyp_page {
	unsigned int refcount;
	unsigned int order;
	struct hyp_pool *pool;
	struct list_head node;
	unsigned short refcount;
	unsigned short order;
};

extern u64 __hyp_vmemmap;
+7 −6
Original line number Diff line number Diff line
@@ -78,19 +78,20 @@ static inline unsigned long hyp_s1_pgtable_pages(void)
	return res;
}

static inline unsigned long host_s2_mem_pgtable_pages(void)
static inline unsigned long host_s2_pgtable_pages(void)
{
	unsigned long res;

	/*
	 * Include an extra 16 pages to safely upper-bound the worst case of
	 * concatenated pgds.
	 */
	return __hyp_pgtable_total_pages() + 16;
}
	res = __hyp_pgtable_total_pages() + 16;

static inline unsigned long host_s2_dev_pgtable_pages(void)
{
	/* Allow 1 GiB for MMIO mappings */
	return __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
	res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);

	return res;
}

#endif /* __KVM_HYP_MM_H */
+30 −30
Original line number Diff line number Diff line
@@ -23,8 +23,7 @@
extern unsigned long hyp_nr_cpus;
struct host_kvm host_kvm;

static struct hyp_pool host_s2_mem;
static struct hyp_pool host_s2_dev;
static struct hyp_pool host_s2_pool;

/*
 * Copies of the host's CPU features registers holding sanitized values.
@@ -36,7 +35,7 @@ static const u8 pkvm_hyp_id = 1;

static void *host_s2_zalloc_pages_exact(size_t size)
{
	return hyp_alloc_pages(&host_s2_mem, get_order(size));
	return hyp_alloc_pages(&host_s2_pool, get_order(size));
}

static void *host_s2_zalloc_page(void *pool)
@@ -44,20 +43,24 @@ static void *host_s2_zalloc_page(void *pool)
	return hyp_alloc_pages(pool, 0);
}

static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
static void host_s2_get_page(void *addr)
{
	hyp_get_page(&host_s2_pool, addr);
}

static void host_s2_put_page(void *addr)
{
	hyp_put_page(&host_s2_pool, addr);
}

static int prepare_s2_pool(void *pgt_pool_base)
{
	unsigned long nr_pages, pfn;
	int ret;

	pfn = hyp_virt_to_pfn(mem_pgt_pool);
	nr_pages = host_s2_mem_pgtable_pages();
	ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0);
	if (ret)
		return ret;

	pfn = hyp_virt_to_pfn(dev_pgt_pool);
	nr_pages = host_s2_dev_pgtable_pages();
	ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0);
	pfn = hyp_virt_to_pfn(pgt_pool_base);
	nr_pages = host_s2_pgtable_pages();
	ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
	if (ret)
		return ret;

@@ -67,8 +70,8 @@ static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
		.phys_to_virt = hyp_phys_to_virt,
		.virt_to_phys = hyp_virt_to_phys,
		.page_count = hyp_page_count,
		.get_page = hyp_get_page,
		.put_page = hyp_put_page,
		.get_page = host_s2_get_page,
		.put_page = host_s2_put_page,
	};

	return 0;
@@ -86,7 +89,7 @@ static void prepare_host_vtcr(void)
					  id_aa64mmfr1_el1_sys_val, phys_shift);
}

int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
int kvm_host_prepare_stage2(void *pgt_pool_base)
{
	struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
	int ret;
@@ -94,7 +97,7 @@ int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
	prepare_host_vtcr();
	hyp_spin_lock_init(&host_kvm.lock);

	ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool);
	ret = prepare_s2_pool(pgt_pool_base);
	if (ret)
		return ret;

@@ -199,11 +202,10 @@ static bool range_is_memory(u64 start, u64 end)
}

static inline int __host_stage2_idmap(u64 start, u64 end,
				      enum kvm_pgtable_prot prot,
				      struct hyp_pool *pool)
				      enum kvm_pgtable_prot prot)
{
	return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
				      prot, pool);
				      prot, &host_s2_pool);
}

static int host_stage2_idmap(u64 addr)
@@ -211,7 +213,6 @@ static int host_stage2_idmap(u64 addr)
	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
	struct kvm_mem_range range;
	bool is_memory = find_mem_range(addr, &range);
	struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev;
	int ret;

	if (is_memory)
@@ -222,22 +223,21 @@ static int host_stage2_idmap(u64 addr)
	if (ret)
		goto unlock;

	ret = __host_stage2_idmap(range.start, range.end, prot, pool);
	if (is_memory || ret != -ENOMEM)
	ret = __host_stage2_idmap(range.start, range.end, prot);
	if (ret != -ENOMEM)
		goto unlock;

	/*
	 * host_s2_mem has been provided with enough pages to cover all of
	 * memory with page granularity, so we should never hit the ENOMEM case.
	 * However, it is difficult to know how much of the MMIO range we will
	 * need to cover upfront, so we may need to 'recycle' the pages if we
	 * run out.
	 * The pool has been provided with enough pages to cover all of memory
	 * with page granularity, but it is difficult to know how much of the
	 * MMIO range we will need to cover upfront, so we may need to 'recycle'
	 * the pages if we run out.
	 */
	ret = host_stage2_unmap_dev_all();
	if (ret)
		goto unlock;

	ret = __host_stage2_idmap(range.start, range.end, prot, pool);
	ret = __host_stage2_idmap(range.start, range.end, prot);

unlock:
	hyp_spin_unlock(&host_kvm.lock);
@@ -258,7 +258,7 @@ int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)

	hyp_spin_lock(&host_kvm.lock);
	ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
					   &host_s2_mem, pkvm_hyp_id);
					   &host_s2_pool, pkvm_hyp_id);
	hyp_spin_unlock(&host_kvm.lock);

	return ret != -EAGAIN ? ret : 0;
Loading