Commit 5206548f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes from Michael Ellerman:

 - Partly revert a change to our timer_interrupt() that caused lockups
   with high res timers disabled.

 - Fix a bug in KVM TCE handling that could corrupt kernel memory.

 - Two commits fixing Power9/Power10 perf alternative event selection.

Thanks to Alexey Kardashevskiy, Athira Rajeev, David Gibson, Frederic
Barrat, Madhavan Srinivasan, Miguel Ojeda, and Nicholas Piggin.

* tag 'powerpc-5.18-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  powerpc/perf: Fix 32bit compile
  powerpc/perf: Fix power10 event alternatives
  powerpc/perf: Fix power9 event alternatives
  KVM: PPC: Fix TCE handling for VFIO
  powerpc/time: Always set decrementer in timer_interrupt()
parents f48ffef1 bb82c574
Loading
Loading
Loading
Loading
+14 −15
Original line number Diff line number Diff line
@@ -615,8 +615,6 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
		return;
	}

	/* Conditionally hard-enable interrupts. */
	if (should_hard_irq_enable()) {
	/*
	 * Ensure a positive value is written to the decrementer, or
	 * else some CPUs will continue to take decrementer exceptions.
@@ -630,8 +628,9 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
	else
		set_dec(decrementer_max);

	/* Conditionally hard-enable interrupts. */
	if (should_hard_irq_enable())
		do_hard_irq_enable();
	}

#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
	if (atomic_read(&ppc_n_lost_interrupts) != 0)
+23 −22
Original line number Diff line number Diff line
@@ -420,13 +420,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
	tbl[idx % TCES_PER_PAGE] = tce;
}

static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
		unsigned long entry)
static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
		struct iommu_table *tbl, unsigned long entry)
{
	unsigned long i;
	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
	unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);

	for (i = 0; i < subpages; ++i) {
		unsigned long hpa = 0;
		enum dma_data_direction dir = DMA_NONE;

	iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
		iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
	}
}

static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -485,6 +491,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
			break;
	}

	iommu_tce_kill(tbl, io_entry, subpages);

	return ret;
}

@@ -544,6 +552,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
			break;
	}

	iommu_tce_kill(tbl, io_entry, subpages);

	return ret;
}

@@ -590,10 +600,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
					entry, ua, dir);

		iommu_tce_kill(stit->tbl, entry, 1);

		if (ret != H_SUCCESS) {
			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
			kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
			goto unlock_exit;
		}
	}
@@ -669,13 +678,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
		 */
		if (get_user(tce, tces + i)) {
			ret = H_TOO_HARD;
			goto invalidate_exit;
			goto unlock_exit;
		}
		tce = be64_to_cpu(tce);

		if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
			ret = H_PARAMETER;
			goto invalidate_exit;
			goto unlock_exit;
		}

		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -684,19 +693,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
					iommu_tce_direction(tce));

			if (ret != H_SUCCESS) {
				kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
						entry);
				goto invalidate_exit;
				kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
						 entry + i);
				goto unlock_exit;
			}
		}

		kvmppc_tce_put(stt, entry + i, tce);
	}

invalidate_exit:
	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
		iommu_tce_kill(stit->tbl, entry, npages);

unlock_exit:
	srcu_read_unlock(&vcpu->kvm->srcu, idx);

@@ -735,20 +740,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
				continue;

			if (ret == H_TOO_HARD)
				goto invalidate_exit;
				return ret;

			WARN_ON_ONCE(1);
			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
			kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
		}
	}

	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);

invalidate_exit:
	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
		iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);

	return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
+22 −22
Original line number Diff line number Diff line
@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
		tbl->it_ops->tce_kill(tbl, entry, pages, true);
}

static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
		unsigned long entry)
static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
		struct iommu_table *tbl, unsigned long entry)
{
	unsigned long i;
	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
	unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);

	for (i = 0; i < subpages; ++i) {
		unsigned long hpa = 0;
		enum dma_data_direction dir = DMA_NONE;

	iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
		iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
	}
}

static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
			break;
	}

	iommu_tce_kill_rm(tbl, io_entry, subpages);

	return ret;
}

@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
			break;
	}

	iommu_tce_kill_rm(tbl, io_entry, subpages);

	return ret;
}

@@ -420,10 +430,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
			ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
					stit->tbl, entry, ua, dir);

		iommu_tce_kill_rm(stit->tbl, entry, 1);

		if (ret != H_SUCCESS) {
			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
			kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
			return ret;
		}
	}
@@ -561,7 +569,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
		ua = 0;
		if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
			ret = H_PARAMETER;
			goto invalidate_exit;
			goto unlock_exit;
		}

		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -570,19 +578,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
					iommu_tce_direction(tce));

			if (ret != H_SUCCESS) {
				kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
						entry);
				goto invalidate_exit;
				kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
						entry + i);
				goto unlock_exit;
			}
		}

		kvmppc_rm_tce_put(stt, entry + i, tce);
	}

invalidate_exit:
	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
		iommu_tce_kill_rm(stit->tbl, entry, npages);

unlock_exit:
	if (!prereg)
		arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
@@ -620,20 +624,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
				continue;

			if (ret == H_TOO_HARD)
				goto invalidate_exit;
				return ret;

			WARN_ON_ONCE_RM(1);
			kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
			kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
		}
	}

	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
		kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);

invalidate_exit:
	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
		iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);

	return ret;
}

+2 −2
Original line number Diff line number Diff line
@@ -3,11 +3,11 @@
obj-y				+= callchain.o callchain_$(BITS).o perf_regs.o
obj-$(CONFIG_COMPAT)		+= callchain_32.o

obj-$(CONFIG_PPC_PERF_CTRS)	+= core-book3s.o bhrb.o
obj-$(CONFIG_PPC_PERF_CTRS)	+= core-book3s.o
obj64-$(CONFIG_PPC_PERF_CTRS)	+= ppc970-pmu.o power5-pmu.o \
				   power5+-pmu.o power6-pmu.o power7-pmu.o \
				   isa207-common.o power8-pmu.o power9-pmu.o \
				   generic-compat-pmu.o power10-pmu.o
				   generic-compat-pmu.o power10-pmu.o bhrb.o
obj32-$(CONFIG_PPC_PERF_CTRS)	+= mpc7450-pmu.o

obj-$(CONFIG_PPC_POWERNV)	+= imc-pmu.o
+1 −1
Original line number Diff line number Diff line
@@ -91,8 +91,8 @@ extern u64 PERF_REG_EXTENDED_MASK;

/* Table of alternatives, sorted by column 0 */
static const unsigned int power10_event_alternatives[][MAX_ALT] = {
	{ PM_CYC_ALT,			PM_CYC },
	{ PM_INST_CMPL_ALT,		PM_INST_CMPL },
	{ PM_CYC_ALT,			PM_CYC },
};

static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
Loading