Commit 36f7b2f3 authored by Claire Chang's avatar Claire Chang Committed by Konrad Rzeszutek Wilk
Browse files

swiotlb: Move alloc_size to swiotlb_find_slots



Rename find_slots to swiotlb_find_slots and move the maintenance of
alloc_size to it for better code reusability later.

Signed-off-by: default avatarClaire Chang <tientzu@chromium.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Tested-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 903cd0f3
Loading
Loading
Loading
Loading
+9 −8
Original line number Diff line number Diff line
@@ -430,7 +430,7 @@ static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
 * Find a suitable number of IO TLB entries size that will fit this request and
 * allocate a buffer from that IO TLB pool.
 */
static int find_slots(struct device *dev, phys_addr_t orig_addr,
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
			      size_t alloc_size)
{
	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
@@ -442,6 +442,7 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr,
		dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
	unsigned int nslots = nr_slots(alloc_size), stride;
	unsigned int index, wrap, count = 0, i;
	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
	unsigned long flags;

	BUG_ON(!nslots);
@@ -486,8 +487,11 @@ static int find_slots(struct device *dev, phys_addr_t orig_addr,
	return -1;

found:
	for (i = index; i < index + nslots; i++)
	for (i = index; i < index + nslots; i++) {
		mem->slots[i].list = 0;
		mem->slots[i].alloc_size =
			alloc_size - (offset + ((i - index) << IO_TLB_SHIFT));
	}
	for (i = index - 1;
	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
	     mem->slots[i].list; i--)
@@ -528,7 +532,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
		return (phys_addr_t)DMA_MAPPING_ERROR;
	}

	index = find_slots(dev, orig_addr, alloc_size + offset);
	index = swiotlb_find_slots(dev, orig_addr, alloc_size + offset);
	if (index == -1) {
		if (!(attrs & DMA_ATTR_NO_WARN))
			dev_warn_ratelimited(dev,
@@ -542,11 +546,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
	 * This is needed when we sync the memory.  Then we sync the buffer if
	 * needed.
	 */
	for (i = 0; i < nr_slots(alloc_size + offset); i++) {
	for (i = 0; i < nr_slots(alloc_size + offset); i++)
		mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
		mem->slots[index + i].alloc_size =
			alloc_size - (i << IO_TLB_SHIFT);
	}
	tlb_addr = slot_addr(mem->start, index) + offset;
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
	    (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))