Commit bd7b12aa authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes from Michael Ellerman:
 "Fix our KVM reverse map real-mode handling since we enabled huge
  vmalloc (in some configurations).

  Revert a recent change to our IOMMU code which broke some devices.

  Fix KVM handling of FSCR on P7/P8, which could have possibly let a
  guest crash it's Qemu.

  Fix kprobes validation of prefixed instructions across page boundary.

  Thanks to Alexey Kardashevskiy, Christophe Leroy, Fabiano Rosas,
  Frederic Barrat, Naveen N. Rao, and Nicholas Piggin"

* tag 'powerpc-5.13-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
  Revert "powerpc/kernel/iommu: Align size for IOMMU_PAGE_SIZE() to save TCEs"
  KVM: PPC: Book3S HV: Save host FSCR in the P7/8 path
  powerpc: Fix reverse map real-mode address lookup with huge vmalloc
  powerpc/kprobes: Fix validation of prefixed instructions across page boundary
parents 773ac53b 59cc84c8
Loading
Loading
Loading
Loading
+29 −0
Original line number Diff line number Diff line
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
	pgd_t *pgdir = init_mm.pgd;
	return __find_linux_pte(pgdir, ea, NULL, hshift);
}

/*
 * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
 * physical address, without taking locks. This can be used in real-mode.
 */
static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
{
	pte_t *ptep;
	phys_addr_t pa;
	int hugepage_shift;

	/*
	 * init_mm does not free page tables, and does not do THP. It may
	 * have huge pages from huge vmalloc / ioremap etc.
	 */
	ptep = find_init_mm_pte(addr, &hugepage_shift);
	if (WARN_ON(!ptep))
		return 0;

	pa = PFN_PHYS(pte_pfn(*ptep));

	if (!hugepage_shift)
		hugepage_shift = PAGE_SHIFT;

	pa |= addr & ((1ul << hugepage_shift) - 1);

	return pa;
}

/*
 * This is what we should always use. Any other lockless page table lookup needs
 * careful audit against THP split.
+1 −22
Original line number Diff line number Diff line
@@ -346,28 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
 */
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
	pte_t *ptep;
	unsigned long pa;
	int hugepage_shift;

	/*
	 * We won't find hugepages here(this is iomem). Hence we are not
	 * worried about _PAGE_SPLITTING/collapse. Also we will not hit
	 * page table free, because of init_mm.
	 */
	ptep = find_init_mm_pte(token, &hugepage_shift);
	if (!ptep)
		return token;

	pa = pte_pfn(*ptep);

	/* On radix we can do hugepage mappings for io, so handle that */
	if (!hugepage_shift)
		hugepage_shift = PAGE_SHIFT;

	pa <<= PAGE_SHIFT;
	pa |= token & ((1ul << hugepage_shift) - 1);
	return pa;
	return ppc_find_vmap_phys(token);
}

/*
+3 −13
Original line number Diff line number Diff line
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
#ifdef CONFIG_PPC_INDIRECT_MMIO
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{
	unsigned hugepage_shift;
	struct iowa_bus *bus;
	int token;

@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
		bus = &iowa_busses[token - 1];
	else {
		unsigned long vaddr, paddr;
		pte_t *ptep;

		vaddr = (unsigned long)PCI_FIX_ADDR(addr);
		if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
			return NULL;
		/*
		 * We won't find huge pages here (iomem). Also can't hit
		 * a page table free due to init_mm
		 */
		ptep = find_init_mm_pte(vaddr, &hugepage_shift);
		if (ptep == NULL)
			paddr = 0;
		else {
			WARN_ON(hugepage_shift);
			paddr = pte_pfn(*ptep) << PAGE_SHIFT;
		}

		paddr = ppc_find_vmap_phys(vaddr);

		bus = iowa_pci_find(vaddr, paddr);

		if (bus == NULL)
+5 −6
Original line number Diff line number Diff line
@@ -898,7 +898,6 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
	unsigned int order;
	unsigned int nio_pages, io_order;
	struct page *page;
	size_t size_io = size;

	size = PAGE_ALIGN(size);
	order = get_order(size);
@@ -925,9 +924,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
	memset(ret, 0, size);

	/* Set up tces to cover the allocated range */
	size_io = IOMMU_PAGE_ALIGN(size_io, tbl);
	nio_pages = size_io >> tbl->it_page_shift;
	io_order = get_iommu_order(size_io, tbl);
	nio_pages = size >> tbl->it_page_shift;
	io_order = get_iommu_order(size, tbl);
	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
			      mask >> tbl->it_page_shift, io_order, 0);
	if (mapping == DMA_MAPPING_ERROR) {
@@ -942,9 +940,10 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
			 void *vaddr, dma_addr_t dma_handle)
{
	if (tbl) {
		size_t size_io = IOMMU_PAGE_ALIGN(size, tbl);
		unsigned int nio_pages = size_io >> tbl->it_page_shift;
		unsigned int nio_pages;

		size = PAGE_ALIGN(size);
		nio_pages = size >> tbl->it_page_shift;
		iommu_free(tbl, dma_handle, nio_pages);
		size = PAGE_ALIGN(size);
		free_pages((unsigned long)vaddr, get_order(size));
+2 −2
Original line number Diff line number Diff line
@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p)
	int ret = 0;
	struct kprobe *prev;
	struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
	struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));

	if ((unsigned long)p->addr & 0x03) {
		printk("Attempt to register kprobe at an unaligned address\n");
@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p)
	} else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
		printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
		ret = -EINVAL;
	} else if (ppc_inst_prefixed(prefix)) {
	} else if ((unsigned long)p->addr & ~PAGE_MASK &&
		   ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) {
		printk("Cannot register a kprobe on the second word of prefixed instruction\n");
		ret = -EINVAL;
	}
Loading