Commit 2973073a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Konrad Rzeszutek Wilk
Browse files

swiotlb: remove the alloc_size parameter to swiotlb_tbl_unmap_single



Now that swiotlb remembers the allocation size there is no need to pass
it back to swiotlb_tbl_unmap_single.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 9906aa5b
Loading
Loading
Loading
Loading
+3 −8
Original line number Original line Diff line number Diff line
@@ -499,8 +499,6 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
		unsigned long attrs)
		unsigned long attrs)
{
{
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
	phys_addr_t phys;
	phys_addr_t phys;


	phys = iommu_iova_to_phys(domain, dma_addr);
	phys = iommu_iova_to_phys(domain, dma_addr);
@@ -510,8 +508,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
	__iommu_dma_unmap(dev, dma_addr, size);
	__iommu_dma_unmap(dev, dma_addr, size);


	if (unlikely(is_swiotlb_buffer(phys)))
	if (unlikely(is_swiotlb_buffer(phys)))
		swiotlb_tbl_unmap_single(dev, phys, size,
		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
				iova_align(iovad, size), dir, attrs);
}
}


static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
@@ -581,10 +578,8 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
	}
	}


	iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
	iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
	if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
	if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys))
		swiotlb_tbl_unmap_single(dev, phys, org_size,
		swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
				aligned_size, dir, attrs);

	return iova;
	return iova;
}
}


+2 −2
Original line number Original line Diff line number Diff line
@@ -406,7 +406,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
	 * Ensure that the address returned is DMA'ble
	 * Ensure that the address returned is DMA'ble
	 */
	 */
	if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
	if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
		swiotlb_tbl_unmap_single(dev, map, size, size, dir,
		swiotlb_tbl_unmap_single(dev, map, size, dir,
				attrs | DMA_ATTR_SKIP_CPU_SYNC);
				attrs | DMA_ATTR_SKIP_CPU_SYNC);
		return DMA_MAPPING_ERROR;
		return DMA_MAPPING_ERROR;
	}
	}
@@ -445,7 +445,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,


	/* NOTE: We use dev_addr here, not paddr! */
	/* NOTE: We use dev_addr here, not paddr! */
	if (is_xen_swiotlb_buffer(hwdev, dev_addr))
	if (is_xen_swiotlb_buffer(hwdev, dev_addr))
		swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
		swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
}
}


static void
static void
+0 −1
Original line number Original line Diff line number Diff line
@@ -57,7 +57,6 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
				     phys_addr_t tlb_addr,
				     phys_addr_t tlb_addr,
				     size_t mapping_size,
				     size_t mapping_size,
				     size_t alloc_size,
				     enum dma_data_direction dir,
				     enum dma_data_direction dir,
				     unsigned long attrs);
				     unsigned long attrs);


+1 −1
Original line number Original line Diff line number Diff line
@@ -114,6 +114,6 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
		dma_direct_sync_single_for_cpu(dev, addr, size, dir);
		dma_direct_sync_single_for_cpu(dev, addr, size, dir);


	if (unlikely(is_swiotlb_buffer(phys)))
	if (unlikely(is_swiotlb_buffer(phys)))
		swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
}
}
#endif /* _KERNEL_DMA_DIRECT_H */
#endif /* _KERNEL_DMA_DIRECT_H */
+23 −22
Original line number Original line Diff line number Diff line
@@ -102,7 +102,7 @@ static phys_addr_t *io_tlb_orig_addr;
/*
/*
 * The mapped buffer's size should be validated during a sync operation.
 * The mapped buffer's size should be validated during a sync operation.
 */
 */
static size_t *io_tlb_orig_size;
static size_t *io_tlb_alloc_size;


/*
/*
 * Protect the above data structures in the map and unmap calls
 * Protect the above data structures in the map and unmap calls
@@ -253,15 +253,15 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
		      __func__, alloc_size, PAGE_SIZE);
		      __func__, alloc_size, PAGE_SIZE);


	alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t));
	alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t));
	io_tlb_orig_size = memblock_alloc(alloc_size, PAGE_SIZE);
	io_tlb_alloc_size = memblock_alloc(alloc_size, PAGE_SIZE);
	if (!io_tlb_orig_size)
	if (!io_tlb_alloc_size)
		panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
		panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
		      __func__, alloc_size, PAGE_SIZE);
		      __func__, alloc_size, PAGE_SIZE);


	for (i = 0; i < io_tlb_nslabs; i++) {
	for (i = 0; i < io_tlb_nslabs; i++) {
		io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i);
		io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i);
		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
		io_tlb_orig_size[i] = 0;
		io_tlb_alloc_size[i] = 0;
	}
	}
	io_tlb_index = 0;
	io_tlb_index = 0;
	no_iotlb_memory = false;
	no_iotlb_memory = false;
@@ -393,18 +393,18 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
	if (!io_tlb_orig_addr)
	if (!io_tlb_orig_addr)
		goto cleanup4;
		goto cleanup4;


	io_tlb_orig_size = (size_t *)
	io_tlb_alloc_size = (size_t *)
		__get_free_pages(GFP_KERNEL,
		__get_free_pages(GFP_KERNEL,
				 get_order(io_tlb_nslabs *
				 get_order(io_tlb_nslabs *
					   sizeof(size_t)));
					   sizeof(size_t)));
	if (!io_tlb_orig_size)
	if (!io_tlb_alloc_size)
		goto cleanup5;
		goto cleanup5;




	for (i = 0; i < io_tlb_nslabs; i++) {
	for (i = 0; i < io_tlb_nslabs; i++) {
		io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i);
		io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i);
		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
		io_tlb_orig_size[i] = 0;
		io_tlb_alloc_size[i] = 0;
	}
	}
	io_tlb_index = 0;
	io_tlb_index = 0;
	no_iotlb_memory = false;
	no_iotlb_memory = false;
@@ -436,7 +436,7 @@ void __init swiotlb_exit(void)
		return;
		return;


	if (late_alloc) {
	if (late_alloc) {
		free_pages((unsigned long)io_tlb_orig_size,
		free_pages((unsigned long)io_tlb_alloc_size,
			   get_order(io_tlb_nslabs * sizeof(size_t)));
			   get_order(io_tlb_nslabs * sizeof(size_t)));
		free_pages((unsigned long)io_tlb_orig_addr,
		free_pages((unsigned long)io_tlb_orig_addr,
			   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
			   get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
@@ -447,7 +447,7 @@ void __init swiotlb_exit(void)
	} else {
	} else {
		memblock_free_late(__pa(io_tlb_orig_addr),
		memblock_free_late(__pa(io_tlb_orig_addr),
				   PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
				   PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
		memblock_free_late(__pa(io_tlb_orig_size),
		memblock_free_late(__pa(io_tlb_alloc_size),
				   PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t)));
				   PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t)));
		memblock_free_late(__pa(io_tlb_list),
		memblock_free_late(__pa(io_tlb_list),
				   PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
				   PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
@@ -639,7 +639,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
	 */
	 */
	for (i = 0; i < nr_slots(alloc_size + offset); i++) {
	for (i = 0; i < nr_slots(alloc_size + offset); i++) {
		io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i);
		io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i);
		io_tlb_orig_size[index+i] = alloc_size - (i << IO_TLB_SHIFT);
		io_tlb_alloc_size[index+i] = alloc_size - (i << IO_TLB_SHIFT);
	}
	}
	tlb_addr = slot_addr(io_tlb_start, index) + offset;
	tlb_addr = slot_addr(io_tlb_start, index) + offset;
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
@@ -648,14 +648,14 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
	return tlb_addr;
	return tlb_addr;
}
}


static void validate_sync_size_and_truncate(struct device *hwdev, size_t orig_size, size_t *size)
static void validate_sync_size_and_truncate(struct device *hwdev, size_t alloc_size, size_t *size)
{
{
	if (*size > orig_size) {
	if (*size > alloc_size) {
		/* Warn and truncate mapping_size */
		/* Warn and truncate mapping_size */
		dev_WARN_ONCE(hwdev, 1,
		dev_WARN_ONCE(hwdev, 1,
			"Attempt for buffer overflow. Original size: %zu. Mapping size: %zu.\n",
			"Attempt for buffer overflow. Original size: %zu. Mapping size: %zu.\n",
			orig_size, *size);
			alloc_size, *size);
		*size = orig_size;
		*size = alloc_size;
	}
	}
}
}


@@ -663,16 +663,17 @@ static void validate_sync_size_and_truncate(struct device *hwdev, size_t orig_si
 * tlb_addr is the physical address of the bounce buffer to unmap.
 * tlb_addr is the physical address of the bounce buffer to unmap.
 */
 */
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
			      size_t mapping_size, size_t alloc_size,
			      size_t mapping_size, enum dma_data_direction dir,
			      enum dma_data_direction dir, unsigned long attrs)
			      unsigned long attrs)
{
{
	unsigned long flags;
	unsigned long flags;
	unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
	unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr);
	int i, count, nslots = nr_slots(alloc_size + offset);
	int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
	int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT;
	phys_addr_t orig_addr = io_tlb_orig_addr[index];
	phys_addr_t orig_addr = io_tlb_orig_addr[index];
	size_t alloc_size = io_tlb_alloc_size[index];
	int i, count, nslots = nr_slots(alloc_size + offset);


	validate_sync_size_and_truncate(hwdev, io_tlb_orig_size[index], &mapping_size);
	validate_sync_size_and_truncate(hwdev, alloc_size, &mapping_size);


	/*
	/*
	 * First, sync the memory before unmapping the entry
	 * First, sync the memory before unmapping the entry
@@ -701,7 +702,7 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
	for (i = index + nslots - 1; i >= index; i--) {
	for (i = index + nslots - 1; i >= index; i--) {
		io_tlb_list[i] = ++count;
		io_tlb_list[i] = ++count;
		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
		io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
		io_tlb_orig_size[i] = 0;
		io_tlb_alloc_size[i] = 0;
	}
	}


	/*
	/*
@@ -721,13 +722,13 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
			     enum dma_sync_target target)
			     enum dma_sync_target target)
{
{
	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
	int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
	size_t orig_size = io_tlb_orig_size[index];
	size_t alloc_size = io_tlb_alloc_size[index];
	phys_addr_t orig_addr = io_tlb_orig_addr[index];
	phys_addr_t orig_addr = io_tlb_orig_addr[index];


	if (orig_addr == INVALID_PHYS_ADDR)
	if (orig_addr == INVALID_PHYS_ADDR)
		return;
		return;


	validate_sync_size_and_truncate(hwdev, orig_size, &size);
	validate_sync_size_and_truncate(hwdev, alloc_size, &size);


	switch (target) {
	switch (target) {
	case SYNC_FOR_CPU:
	case SYNC_FOR_CPU:
@@ -770,7 +771,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
	/* Ensure that the address returned is DMA'ble */
	/* Ensure that the address returned is DMA'ble */
	dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
	dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
		swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
		swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
			attrs | DMA_ATTR_SKIP_CPU_SYNC);
			attrs | DMA_ATTR_SKIP_CPU_SYNC);
		dev_WARN_ONCE(dev, 1,
		dev_WARN_ONCE(dev, 1,
			"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
			"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",