Commit 79fbd3e1 authored by Maor Gottlieb's avatar Maor Gottlieb Committed by Jason Gunthorpe
Browse files

RDMA: Use the sg_table directly and remove the opencoded version from umem

This allows using the normal sg_table APIs and makes all the code
cleaner. Remove sgt, nents and nmapd from ib_umem.

Link: https://lore.kernel.org/r/20210824142531.3877007-4-maorg@nvidia.com


Signed-off-by: default avatarMaor Gottlieb <maorg@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 3e302dbc
Loading
Loading
Loading
Loading
+11 −21
Original line number Diff line number Diff line
@@ -51,11 +51,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
	struct scatterlist *sg;
	unsigned int i;

	if (umem->nmap > 0)
		ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
				DMA_BIDIRECTIONAL);
	if (dirty)
		ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
					   DMA_BIDIRECTIONAL, 0);

	for_each_sg(umem->sg_head.sgl, sg, umem->sg_nents, i)
	for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i)
		unpin_user_page_range_dirty_lock(sg_page(sg),
			DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);

@@ -111,7 +111,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
	/* offset into first SGL */
	pgoff = umem->address & ~PAGE_MASK;

	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
	for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
		/* Walk SGL and reduce max page size if VA/PA bits differ
		 * for any address.
		 */
@@ -121,7 +121,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
		 * the maximum possible page size as the low bits of the iova
		 * must be zero when starting the next chunk.
		 */
		if (i != (umem->nmap - 1))
		if (i != (umem->sgt_append.sgt.nents - 1))
			mask |= va;
		pgoff = 0;
	}
@@ -231,30 +231,19 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
			&umem->sgt_append, page_list, pinned, 0,
			pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
			npages, GFP_KERNEL);
		umem->sg_nents = umem->sgt_append.sgt.nents;
		if (ret) {
			memcpy(&umem->sg_head.sgl, &umem->sgt_append.sgt,
			       sizeof(umem->sgt_append.sgt));
			unpin_user_pages_dirty_lock(page_list, pinned, 0);
			goto umem_release;
		}
	}

	memcpy(&umem->sg_head.sgl, &umem->sgt_append.sgt,
	       sizeof(umem->sgt_append.sgt));
	if (access & IB_ACCESS_RELAXED_ORDERING)
		dma_attr |= DMA_ATTR_WEAK_ORDERING;

	umem->nmap =
		ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
	ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt,
				       DMA_BIDIRECTIONAL, dma_attr);

	if (!umem->nmap) {
		ret = -ENOMEM;
	if (ret)
		goto umem_release;
	}

	ret = 0;
	goto out;

umem_release:
@@ -314,7 +303,8 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
		return -EINVAL;
	}

	ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->sg_nents, dst, length,
	ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl,
				 umem->sgt_append.sgt.orig_nents, dst, length,
				 offset + ib_umem_offset(umem));

	if (ret < 0)
+2 −3
Original line number Diff line number Diff line
@@ -55,9 +55,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
		cur += sg_dma_len(sg);
	}

	umem_dmabuf->umem.sg_head.sgl = umem_dmabuf->first_sg;
	umem_dmabuf->umem.sg_head.nents = nmap;
	umem_dmabuf->umem.nmap = nmap;
	umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
	umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
	umem_dmabuf->sgt = sgt;

wait_fence:
+2 −2
Original line number Diff line number Diff line
@@ -42,8 +42,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,

found:
	offset = virt - page_addr;
	db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
	db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
	db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset;
	db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset;
	db->u.user_page = page;
	refcount_inc(&page->refcount);

+1 −1
Original line number Diff line number Diff line
@@ -2235,7 +2235,7 @@ static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
	pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;

	if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
		iwpbl->qp_mr.sq_page = sg_page(region->sg_head.sgl);
		iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);

	rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
		*pbl = rdma_block_iter_dma_address(&biter);
+2 −1
Original line number Diff line number Diff line
@@ -75,7 +75,8 @@ int mlx4_ib_db_map_user(struct ib_udata *udata, unsigned long virt,
	list_add(&page->list, &context->db_page_list);

found:
	db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK);
	db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
		  (virt & ~PAGE_MASK);
	db->u.user_page = page;
	++page->refcnt;

Loading