Commit 81655d3c authored by Jason Gunthorpe's avatar Jason Gunthorpe
Browse files

RDMA/mlx4: Use ib_umem_num_dma_blocks()

For the calls linked to mlx4_ib_umem_calc_optimal_mtt_size() use
ib_umem_num_dma_blocks() inside the function, it is just some weird static
default.

All other places are just using it with PAGE_SIZE, switch to
ib_umem_num_dma_blocks().

As this is the last call site, remove ib_umem_num_count().

Link: https://lore.kernel.org/r/15-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com


Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 87aebd3f
Loading
Loading
Loading
Loading
+0 −12
Original line number Diff line number Diff line
@@ -350,18 +350,6 @@ void ib_umem_release(struct ib_umem *umem)
}
EXPORT_SYMBOL(ib_umem_release);

int ib_umem_page_count(struct ib_umem *umem)
{
	int i, n = 0;
	struct scatterlist *sg;

	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
		n += sg_dma_len(sg) >> PAGE_SHIFT;

	return n;
}
EXPORT_SYMBOL(ib_umem_page_count);

/*
 * Copy from the given ib_umem's pages to the given buffer.
 *
+0 −1
Original line number Diff line number Diff line
@@ -149,7 +149,6 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata,
	if (IS_ERR(*umem))
		return PTR_ERR(*umem);

	n = ib_umem_page_count(*umem);
	shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n);
	err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt);

+3 −2
Original line number Diff line number Diff line
@@ -271,6 +271,8 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
	u64 total_len = 0;
	int i;

	*num_of_mtts = ib_umem_num_dma_blocks(umem, PAGE_SIZE);

	for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i) {
		/*
		 * Initialization - save the first chunk start as the
@@ -421,7 +423,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
		goto err_free;
	}

	n = ib_umem_page_count(mr->umem);
	shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);

	err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
@@ -511,7 +512,7 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
			mmr->umem = NULL;
			goto release_mpt_entry;
		}
		n = ib_umem_page_count(mmr->umem);
		n = ib_umem_num_dma_blocks(mmr->umem, PAGE_SIZE);
		shift = PAGE_SHIFT;

		err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
+0 −2
Original line number Diff line number Diff line
@@ -922,7 +922,6 @@ static int create_rq(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
		goto err;
	}

	n = ib_umem_page_count(qp->umem);
	shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
	err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);

@@ -1117,7 +1116,6 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
			goto err;
		}

		n = ib_umem_page_count(qp->umem);
		shift = mlx4_ib_umem_calc_optimal_mtt_size(qp->umem, 0, &n);
		err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt);

+3 −2
Original line number Diff line number Diff line
@@ -115,7 +115,8 @@ int mlx4_ib_create_srq(struct ib_srq *ib_srq,
		if (IS_ERR(srq->umem))
			return PTR_ERR(srq->umem);

		err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
		err = mlx4_mtt_init(
			dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE),
			PAGE_SHIFT, &srq->mtt);
		if (err)
			goto err_buf;
Loading