Commit 1bac49d4 authored by Quentin Perret's avatar Quentin Perret Committed by Marc Zyngier
Browse files

KVM: arm64: Provide the host_stage2_try() helper macro



We currently unmap all MMIO mappings from the host stage-2 to recycle
the pages whenever we run out. In order to make this pattern easy to
re-use from other places, factor the logic out into a dedicated macro.
While at it, apply the macro for the kvm_pgtable_stage2_set_owner()
calls. They're currently only called early on and are guaranteed to
succeed, but making them robust to the -ENOMEM case doesn't hurt and
will avoid painful debugging sessions later on.

Reviewed-by: default avatarFuad Tabba <tabba@google.com>
Signed-off-by: default avatarQuentin Perret <qperret@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210809152448.1810400-4-qperret@google.com
parent 8e049e0d
Loading
Loading
Loading
Loading
+22 −18
Original line number Diff line number Diff line
@@ -208,6 +208,25 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
				      prot, &host_s2_pool);
}

/*
 * The pool has been provided with enough pages to cover all of memory with
 * page granularity, but it is difficult to know how much of the MMIO range
 * we will need to cover upfront, so we may need to 'recycle' the pages if we
 * run out.
 */
#define host_stage2_try(fn, ...)					\
	({								\
		int __ret;						\
		hyp_assert_lock_held(&host_kvm.lock);			\
		__ret = fn(__VA_ARGS__);				\
		if (__ret == -ENOMEM) {					\
			__ret = host_stage2_unmap_dev_all();		\
			if (!__ret)					\
				__ret = fn(__VA_ARGS__);		\
		}							\
		__ret;							\
	 })

static int host_stage2_idmap(u64 addr)
{
	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
@@ -223,22 +242,7 @@ static int host_stage2_idmap(u64 addr)
	if (ret)
		goto unlock;

	ret = __host_stage2_idmap(range.start, range.end, prot);
	if (ret != -ENOMEM)
		goto unlock;

	/*
	 * The pool has been provided with enough pages to cover all of memory
	 * with page granularity, but it is difficult to know how much of the
	 * MMIO range we will need to cover upfront, so we may need to 'recycle'
	 * the pages if we run out.
	 */
	ret = host_stage2_unmap_dev_all();
	if (ret)
		goto unlock;

	ret = __host_stage2_idmap(range.start, range.end, prot);

	ret = host_stage2_try(__host_stage2_idmap, range.start, range.end, prot);
unlock:
	hyp_spin_unlock(&host_kvm.lock);

@@ -257,8 +261,8 @@ int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
		return -EINVAL;

	hyp_spin_lock(&host_kvm.lock);
	ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
					   &host_s2_pool, pkvm_hyp_id);
	ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
			      start, end - start, &host_s2_pool, pkvm_hyp_id);
	hyp_spin_unlock(&host_kvm.lock);

	return ret != -EAGAIN ? ret : 0;