Commit 3ff8df14 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/kvm/book3s: Avoid using rmap to protect parallel page table update.

parent 7769a339
Loading
Loading
Loading
Loading
+9 −29
Original line number Diff line number Diff line
@@ -74,8 +74,8 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
EXPORT_SYMBOL_GPL(kvmppc_find_table);

#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
		unsigned long *ua, unsigned long **prmap)
static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
				unsigned long tce, unsigned long *ua)
{
	unsigned long gfn = tce >> PAGE_SHIFT;
	struct kvm_memory_slot *memslot;
@@ -87,9 +87,6 @@ static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
	*ua = __gfn_to_hva_memslot(memslot, gfn) |
		(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));

	if (prmap)
		*prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];

	return 0;
}

@@ -116,7 +113,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
	if (iommu_tce_check_gpa(stt->page_shift, gpa))
		return H_PARAMETER;

	if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
	if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
		return H_TOO_HARD;

	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -411,7 +408,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
		return ret;

	dir = iommu_tce_direction(tce);
	if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
	if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
		return H_PARAMETER;

	entry = ioba >> stt->page_shift;
@@ -488,7 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
	struct kvmppc_spapr_tce_table *stt;
	long i, ret = H_SUCCESS;
	unsigned long tces, entry, ua = 0;
	unsigned long *rmap = NULL;
	unsigned long mmu_seq;
	bool prereg = false;
	struct kvmppc_spapr_tce_iommu_table *stit;
@@ -530,7 +526,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
		 */
		struct mm_iommu_table_group_mem_t *mem;

		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
			return H_TOO_HARD;

		mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
@@ -546,23 +542,9 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
		 * We do not require memory to be preregistered in this case
		 * so lock rmap and do __find_linux_pte_or_hugepte().
		 */
		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
			return H_TOO_HARD;

		rmap = (void *) vmalloc_to_phys(rmap);
		if (WARN_ON_ONCE_RM(!rmap))
		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
			return H_TOO_HARD;

		/*
		 * Synchronize with the MMU notifier callbacks in
		 * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
		 * While we have the rmap lock, code running on other CPUs
		 * cannot finish unmapping the host real page that backs
		 * this guest real page, so we are OK to access the host
		 * real page.
		 */
		lock_rmap(rmap);

		arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
		if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
			ret = H_TOO_HARD;
@@ -582,7 +564,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
		unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);

		ua = 0;
		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
			ret = H_PARAMETER;
			goto invalidate_exit;
		}
@@ -607,9 +589,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
		iommu_tce_kill_rm(stit->tbl, entry, npages);

unlock_exit:
	if (rmap)
		unlock_rmap(rmap);

	if (!prereg)
		arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
	return ret;
}