Commit 1ee32109 authored by Will Deacon's avatar Will Deacon Committed by Marc Zyngier
Browse files

KVM: arm64: Implement __pkvm_host_share_hyp() using do_share()



__pkvm_host_share_hyp() shares memory between the host and the
hypervisor so implement it as an invocation of the new do_share()
mechanism.

Note that double-sharing is no longer permitted (as this allows us to
reduce the number of page-table walks significantly), but is thankfully
no longer relied upon by the host.

Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarQuentin Perret <qperret@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20211215161232.1480836-12-qperret@google.com
parent e82edcc7
Loading
Loading
Loading
Loading
+33 −88
Original line number Diff line number Diff line
@@ -370,94 +370,6 @@ static int host_stage2_idmap(u64 addr)
	return ret;
}

static inline bool check_prot(enum kvm_pgtable_prot prot,
			      enum kvm_pgtable_prot required,
			      enum kvm_pgtable_prot denied)
{
	return (prot & (required | denied)) == required;
}

int __pkvm_host_share_hyp(u64 pfn)
{
	phys_addr_t addr = hyp_pfn_to_phys(pfn);
	enum kvm_pgtable_prot prot, cur;
	void *virt = __hyp_va(addr);
	enum pkvm_page_state state;
	kvm_pte_t pte;
	int ret;

	if (!addr_is_memory(addr))
		return -EINVAL;

	host_lock_component();
	hyp_lock_component();

	ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, NULL);
	if (ret)
		goto unlock;
	if (!pte)
		goto map_shared;

	/*
	 * Check attributes in the host stage-2 PTE. We need the page to be:
	 *  - mapped RWX as we're sharing memory;
	 *  - not borrowed, as that implies absence of ownership.
	 * Otherwise, we can't let it got through
	 */
	cur = kvm_pgtable_stage2_pte_prot(pte);
	prot = pkvm_mkstate(0, PKVM_PAGE_SHARED_BORROWED);
	if (!check_prot(cur, PKVM_HOST_MEM_PROT, prot)) {
		ret = -EPERM;
		goto unlock;
	}

	state = pkvm_getstate(cur);
	if (state == PKVM_PAGE_OWNED)
		goto map_shared;

	/*
	 * Tolerate double-sharing the same page, but this requires
	 * cross-checking the hypervisor stage-1.
	 */
	if (state != PKVM_PAGE_SHARED_OWNED) {
		ret = -EPERM;
		goto unlock;
	}

	ret = kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)virt, &pte, NULL);
	if (ret)
		goto unlock;

	/*
	 * If the page has been shared with the hypervisor, it must be
	 * already mapped as SHARED_BORROWED in its stage-1.
	 */
	cur = kvm_pgtable_hyp_pte_prot(pte);
	prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
	if (!check_prot(cur, prot, ~prot))
		ret = -EPERM;
	goto unlock;

map_shared:
	/*
	 * If the page is not yet shared, adjust mappings in both page-tables
	 * while both locks are held.
	 */
	prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
	ret = pkvm_create_mappings_locked(virt, virt + PAGE_SIZE, prot);
	BUG_ON(ret);

	prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
	ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot);
	BUG_ON(ret);

unlock:
	hyp_unlock_component();
	host_unlock_component();

	return ret;
}

void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
{
	struct kvm_vcpu_fault_info fault;
@@ -708,3 +620,36 @@ static int do_share(struct pkvm_mem_share *share)

	return WARN_ON(__do_share(share));
}

int __pkvm_host_share_hyp(u64 pfn)
{
	int ret;
	u64 host_addr = hyp_pfn_to_phys(pfn);
	u64 hyp_addr = (u64)__hyp_va(host_addr);
	struct pkvm_mem_share share = {
		.tx	= {
			.nr_pages	= 1,
			.initiator	= {
				.id	= PKVM_ID_HOST,
				.addr	= host_addr,
				.host	= {
					.completer_addr = hyp_addr,
				},
			},
			.completer	= {
				.id	= PKVM_ID_HYP,
			},
		},
		.completer_prot	= PAGE_HYP,
	};

	host_lock_component();
	hyp_lock_component();

	ret = do_share(&share);

	hyp_unlock_component();
	host_unlock_component();

	return ret;
}