Commit c8a02e38 authored by Aharon Landau's avatar Aharon Landau Committed by Jason Gunthorpe
Browse files

RDMA/mlx5: Clean UMR QP type flow from mlx5_ib_post_send()

No internal UMR operation is using mlx5_ib_post_send(), remove the UMR QP
type logic from this function.

Link: https://lore.kernel.org/r/0b2f368f14bc9266ebdf92a601ca4e1e5b1e1188.1649747695.git.leonro@nvidia.com


Signed-off-by: default avatarAharon Landau <aharonl@nvidia.com>
Reviewed-by: default avatarMichael Guralnik <michaelgur@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 636bdbfc
Loading
Loading
Loading
Loading
+1 −26
Original line number Diff line number Diff line
@@ -291,16 +291,9 @@ struct mlx5_ib_flow_db {
};

/* Use macros here so that don't have to duplicate
 * enum ib_send_flags and enum ib_qp_type for low-level driver
 * enum ib_qp_type for low-level driver
 */

#define MLX5_IB_SEND_UMR_ENABLE_MR	       (IB_SEND_RESERVED_START << 0)
#define MLX5_IB_SEND_UMR_DISABLE_MR	       (IB_SEND_RESERVED_START << 1)
#define MLX5_IB_SEND_UMR_FAIL_IF_FREE	       (IB_SEND_RESERVED_START << 2)
#define MLX5_IB_SEND_UMR_UPDATE_XLT	       (IB_SEND_RESERVED_START << 3)
#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION    (IB_SEND_RESERVED_START << 4)
#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS       IB_SEND_RESERVED_END

#define MLX5_IB_QPT_REG_UMR	IB_QPT_RESERVED1
/*
 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
@@ -536,24 +529,6 @@ struct mlx5_ib_cq_buf {
	int			nent;
};

struct mlx5_umr_wr {
	struct ib_send_wr		wr;
	u64				virt_addr;
	u64				offset;
	struct ib_pd		       *pd;
	unsigned int			page_shift;
	unsigned int			xlt_size;
	u64				length;
	int				access_flags;
	u32				mkey;
	u8				ignore_free_state:1;
};

static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
{
	return container_of(wr, struct mlx5_umr_wr, wr);
}

enum mlx5_ib_cq_pr_flags {
	MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD	= 1 << 0,
	MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
+0 −43
Original line number Diff line number Diff line
@@ -94,49 +94,6 @@ static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
	return 0;
}

int mlx5r_umr_set_umr_ctrl_seg(struct mlx5_ib_dev *dev,
			       struct mlx5_wqe_umr_ctrl_seg *umr,
			       const struct ib_send_wr *wr)
{
	const struct mlx5_umr_wr *umrwr = umr_wr(wr);

	memset(umr, 0, sizeof(*umr));

	if (!umrwr->ignore_free_state) {
		if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
			 /* fail if free */
			umr->flags = MLX5_UMR_CHECK_FREE;
		else
			/* fail if not free */
			umr->flags = MLX5_UMR_CHECK_NOT_FREE;
	}

	umr->xlt_octowords =
		cpu_to_be16(mlx5r_umr_get_xlt_octo(umrwr->xlt_size));
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
		u64 offset = mlx5r_umr_get_xlt_octo(umrwr->offset);

		umr->xlt_offset = cpu_to_be16(offset & 0xffff);
		umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
		umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
	}
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
		umr->mkey_mask |= get_umr_update_translation_mask();
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
		umr->mkey_mask |= get_umr_update_access_mask(dev);
		umr->mkey_mask |= get_umr_update_pd_mask();
	}
	if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
		umr->mkey_mask |= get_umr_enable_mr_mask();
	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
		umr->mkey_mask |= get_umr_disable_mr_mask();

	if (!wr->num_sge)
		umr->flags |= MLX5_UMR_INLINE;

	return umr_check_mkey_mask(dev, be64_to_cpu(umr->mkey_mask));
}

enum {
	MAX_UMR_WR = 128,
};
+0 −4
Original line number Diff line number Diff line
@@ -75,10 +75,6 @@ static inline u64 mlx5r_umr_get_xlt_octo(u64 bytes)
	       MLX5_IB_UMR_OCTOWORD;
}

int mlx5r_umr_set_umr_ctrl_seg(struct mlx5_ib_dev *dev,
			       struct mlx5_wqe_umr_ctrl_seg *umr,
			       const struct ib_send_wr *wr);

struct mlx5r_umr_context {
	struct ib_cqe cqe;
	enum ib_wc_status status;
+0 −72
Original line number Diff line number Diff line
@@ -214,43 +214,6 @@ static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
	seg->status = MLX5_MKEY_STATUS_FREE;
}

static void set_reg_mkey_segment(struct mlx5_ib_dev *dev,
				 struct mlx5_mkey_seg *seg,
				 const struct ib_send_wr *wr)
{
	const struct mlx5_umr_wr *umrwr = umr_wr(wr);

	memset(seg, 0, sizeof(*seg));
	if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
		MLX5_SET(mkc, seg, free, 1);

	MLX5_SET(mkc, seg, a,
		 !!(umrwr->access_flags & IB_ACCESS_REMOTE_ATOMIC));
	MLX5_SET(mkc, seg, rw,
		 !!(umrwr->access_flags & IB_ACCESS_REMOTE_WRITE));
	MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ));
	MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE));
	MLX5_SET(mkc, seg, lr, 1);
	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
		MLX5_SET(mkc, seg, relaxed_ordering_write,
			 !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));
	if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
		MLX5_SET(mkc, seg, relaxed_ordering_read,
			 !!(umrwr->access_flags & IB_ACCESS_RELAXED_ORDERING));

	if (umrwr->pd)
		MLX5_SET(mkc, seg, pd, to_mpd(umrwr->pd)->pdn);
	if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
	    !umrwr->length)
		MLX5_SET(mkc, seg, length64, 1);

	MLX5_SET64(mkc, seg, start_addr, umrwr->virt_addr);
	MLX5_SET64(mkc, seg, len, umrwr->length);
	MLX5_SET(mkc, seg, log_page_size, umrwr->page_shift);
	MLX5_SET(mkc, seg, qpn, 0xffffff);
	MLX5_SET(mkc, seg, mkey_7_0, mlx5_mkey_variant(umrwr->mkey));
}

static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
			     struct mlx5_ib_mr *mr,
			     struct mlx5_ib_pd *pd)
@@ -1059,35 +1022,6 @@ static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
	}
}

static int handle_qpt_reg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
			      const struct ib_send_wr *wr,
			      struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
			      int *size, void **cur_edge, unsigned int idx)
{
	int err = 0;

	if (unlikely(wr->opcode != MLX5_IB_WR_UMR)) {
		err = -EINVAL;
		mlx5_ib_warn(dev, "bad opcode %d\n", wr->opcode);
		goto out;
	}

	qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
	(*ctrl)->imm = cpu_to_be32(umr_wr(wr)->mkey);
	err = mlx5r_umr_set_umr_ctrl_seg(dev, *seg, wr);
	if (unlikely(err))
		goto out;
	*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
	*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
	set_reg_mkey_segment(dev, *seg, wr);
	*seg += sizeof(struct mlx5_mkey_seg);
	*size += sizeof(struct mlx5_mkey_seg) / 16;
	handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
out:
	return err;
}

void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
		   struct mlx5_wqe_ctrl_seg *ctrl)
{
@@ -1220,12 +1154,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
		case IB_QPT_UD:
			handle_qpt_ud(qp, wr, &seg, &size, &cur_edge);
			break;
		case MLX5_IB_QPT_REG_UMR:
			err = handle_qpt_reg_umr(dev, qp, wr, &ctrl, &seg,
						       &size, &cur_edge, idx);
			if (unlikely(err))
				goto out;
			break;

		default:
			break;