Commit 7db0eea9 authored by Jason Gunthorpe's avatar Jason Gunthorpe
Browse files

RDMA/mlx5: Remove ncont from mlx5_ib_cont_pages()

This is the same as ib_umem_num_dma_blocks(umem, 1UL << page_shift), have
the callers compute it directly.

Link: https://lore.kernel.org/r/20201026131936.1335664-7-leon@kernel.org


Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 95741ee3
Loading
Loading
Loading
Loading
+16 −14
Original line number Diff line number Diff line
@@ -746,8 +746,9 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
	if (err)
		goto err_umem;

	mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
			   &ncont);
	mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages,
			   &page_shift);
	ncont = ib_umem_num_dma_blocks(cq->buf.umem, 1UL << page_shift);
	mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
		    ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);

@@ -1128,7 +1129,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
}

static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
		       int entries, struct ib_udata *udata, int *npas,
		       int entries, struct ib_udata *udata,
		       int *page_shift, int *cqe_size)
{
	struct mlx5_ib_resize_cq ucmd;
@@ -1155,7 +1156,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
		return err;
	}

	mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift, npas);
	mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift);

	cq->resize_umem = umem;
	*cqe_size = ucmd.cqe_size;
@@ -1276,21 +1277,22 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)

	mutex_lock(&cq->resize_mutex);
	if (udata) {
		err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
		err = resize_user(dev, cq, entries, udata, &page_shift,
				  &cqe_size);
		if (err)
			goto ex;
		npas = ib_umem_num_dma_blocks(cq->resize_umem, 1UL << page_shift);
	} else {
		struct mlx5_frag_buf *frag_buf;

		cqe_size = 64;
		err = resize_kernel(dev, cq, entries, cqe_size);
		if (!err) {
			struct mlx5_frag_buf *frag_buf = &cq->resize_buf->frag_buf;

		if (err)
			goto ex;
		frag_buf = &cq->resize_buf->frag_buf;
		npas = frag_buf->npages;
		page_shift = frag_buf->page_shift;
	}
	}

	if (err)
		goto ex;

	inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
		MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
+7 −5
Original line number Diff line number Diff line
@@ -95,7 +95,6 @@ struct devx_umem {
	struct ib_umem			*umem;
	u32				page_offset;
	int				page_shift;
	int				ncont;
	u32				dinlen;
	u32				dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
};
@@ -2083,7 +2082,7 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,

	mlx5_ib_cont_pages(obj->umem, obj->umem->address,
			   MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
			   &obj->page_shift, &obj->ncont);
			   &obj->page_shift);

	if (!npages) {
		ib_umem_release(obj->umem);
@@ -2100,8 +2099,10 @@ static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
				   struct devx_umem *obj,
				   struct devx_umem_reg_cmd *cmd)
{
	cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
		    (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
	cmd->inlen =
		MLX5_ST_SZ_BYTES(create_umem_in) +
		(MLX5_ST_SZ_BYTES(mtt) *
		 ib_umem_num_dma_blocks(obj->umem, 1UL << obj->page_shift));
	cmd->in = uverbs_zalloc(attrs, cmd->inlen);
	return PTR_ERR_OR_ZERO(cmd->in);
}
@@ -2117,7 +2118,8 @@ static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
	mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);

	MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
	MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
	MLX5_SET64(umem, umem, num_of_mtt,
		   ib_umem_num_dma_blocks(obj->umem, 1UL << obj->page_shift));
	MLX5_SET(umem, umem, log_page_size, obj->page_shift -
					    MLX5_ADAPTER_PAGE_SHIFT);
	MLX5_SET(umem, umem, page_offset, obj->page_offset);
+4 −11
Original line number Diff line number Diff line
@@ -41,12 +41,9 @@
 * @max_page_shift: high limit for page_shift - 0 means no limit
 * @count: number of PAGE_SIZE pages covered by umem
 * @shift: page shift for the compound pages found in the region
 * @ncont: number of compund pages
 */
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
			unsigned long max_page_shift,
			int *count, int *shift,
			int *ncont)
			unsigned long max_page_shift, int *count, int *shift)
{
	unsigned long tmp;
	unsigned long m;
@@ -60,8 +57,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
		struct ib_umem_odp *odp = to_ib_umem_odp(umem);

		*shift = odp->page_shift;
		*ncont = ib_umem_odp_num_pages(odp);
		*count = *ncont << (*shift - PAGE_SHIFT);
		*count = ib_umem_num_pages(umem);
		return;
	}

@@ -90,13 +86,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
		i += len;
	}

	if (i) {
	if (i)
		m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
		*ncont = DIV_ROUND_UP(i, (1 << m));
	} else {
	else
		m  = 0;
		*ncont = 0;
	}
	*shift = PAGE_SHIFT + m;
	*count = i;
}
+1 −2
Original line number Diff line number Diff line
@@ -1231,8 +1231,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
		       struct ib_port_attr *props);
void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
			unsigned long max_page_shift,
			int *count, int *shift,
			int *ncont);
			int *count, int *shift);
void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
			    int page_shift, size_t offset, size_t num_pages,
			    __be64 *pas, int access_flags);
+2 −4
Original line number Diff line number Diff line
@@ -959,10 +959,9 @@ static struct mlx5_ib_mr *alloc_mr_from_cache(struct ib_pd *pd,
	struct mlx5_ib_mr *mr;
	int npages;
	int page_shift;
	int ncont;

	mlx5_ib_cont_pages(umem, iova, MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
			   &page_shift, &ncont);
			   &page_shift);
	ent = mr_cache_ent_from_order(dev, order_base_2(ib_umem_num_dma_blocks(
						   umem, 1UL << page_shift)));
	if (!ent)
@@ -1153,7 +1152,6 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
	int page_shift;
	__be64 *pas;
	int npages;
	int ncont;
	void *mkc;
	int inlen;
	u32 *in;
@@ -1165,7 +1163,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
		return ERR_PTR(-ENOMEM);

	mlx5_ib_cont_pages(umem, iova, MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
			   &page_shift, &ncont);
			   &page_shift);

	mr->page_shift = page_shift;
	mr->ibmr.pd = pd;
Loading