Commit 3902b429 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe
Browse files

RDMA/rxe: Implement invalidate MW operations

Implement invalidate MW and cleaned up invalidate MR operations.

Added code to perform remote invalidate for send with invalidate.  Added
code to perform local invalidation. Deleted some blank lines in rxe_loc.h.

Link: https://lore.kernel.org/r/20210608042552.33275-9-rpearsonhpe@gmail.com


Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 32a577b4
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -349,7 +349,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp,

	ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
			&wqe->dma, payload_addr(pkt),
			payload_size(pkt), to_mr_obj, NULL);
			payload_size(pkt), RXE_TO_MR_OBJ, NULL);
	if (ret)
		return COMPST_ERROR;

@@ -369,7 +369,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,

	ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
			&wqe->dma, &atomic_orig,
			sizeof(u64), to_mr_obj, NULL);
			sizeof(u64), RXE_TO_MR_OBJ, NULL);
	if (ret)
		return COMPST_ERROR;
	else
+6 −23
Original line number Diff line number Diff line
@@ -71,46 +71,29 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev, u32 size,
int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);

/* rxe_mr.c */
enum copy_direction {
	to_mr_obj,
	from_mr_obj,
};

u8 rxe_get_next_key(u32 last_key);
void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);

int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
		     int access, struct rxe_mr *mr);

int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);

int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
		enum copy_direction dir, u32 *crcp);

		enum rxe_mr_copy_dir dir, u32 *crcp);
int copy_data(struct rxe_pd *pd, int access,
	      struct rxe_dma_info *dma, void *addr, int length,
	      enum copy_direction dir, u32 *crcp);

	      enum rxe_mr_copy_dir dir, u32 *crcp);
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);

enum lookup_type {
	lookup_local,
	lookup_remote,
};

struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
			 enum lookup_type type);

			 enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);

void rxe_mr_cleanup(struct rxe_pool_entry *arg);

int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
void rxe_mr_cleanup(struct rxe_pool_entry *arg);

/* rxe_mw.c */
int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
int rxe_dealloc_mw(struct ib_mw *ibmw);
int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey);
void rxe_mw_cleanup(struct rxe_pool_entry *arg);

/* rxe_net.c */
+55 −26
Original line number Diff line number Diff line
@@ -55,21 +55,6 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
	mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}

void rxe_mr_cleanup(struct rxe_pool_entry *arg)
{
	struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
	int i;

	ib_umem_release(mr->umem);

	if (mr->map) {
		for (i = 0; i < mr->num_map; i++)
			kfree(mr->map[i]);

		kfree(mr->map);
	}
}

static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
	int i;
@@ -298,7 +283,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
 * crc32 if crcp is not zero. caller must hold a reference to mr
 */
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
		enum copy_direction dir, u32 *crcp)
		enum rxe_mr_copy_dir dir, u32 *crcp)
{
	int			err;
	int			bytes;
@@ -316,9 +301,9 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
	if (mr->type == RXE_MR_TYPE_DMA) {
		u8 *src, *dest;

		src = (dir == to_mr_obj) ? addr : ((void *)(uintptr_t)iova);
		src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova);

		dest = (dir == to_mr_obj) ? ((void *)(uintptr_t)iova) : addr;
		dest = (dir == RXE_TO_MR_OBJ) ? ((void *)(uintptr_t)iova) : addr;

		memcpy(dest, src, length);

@@ -346,8 +331,8 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
		u8 *src, *dest;

		va	= (u8 *)(uintptr_t)buf->addr + offset;
		src = (dir == to_mr_obj) ? addr : va;
		dest = (dir == to_mr_obj) ? va : addr;
		src = (dir == RXE_TO_MR_OBJ) ? addr : va;
		dest = (dir == RXE_TO_MR_OBJ) ? va : addr;

		bytes	= buf->size - offset;

@@ -392,7 +377,7 @@ int copy_data(
	struct rxe_dma_info	*dma,
	void			*addr,
	int			length,
	enum copy_direction	dir,
	enum rxe_mr_copy_dir	dir,
	u32			*crcp)
{
	int			bytes;
@@ -412,7 +397,7 @@ int copy_data(
	}

	if (sge->length && (offset < sge->length)) {
		mr = lookup_mr(pd, access, sge->lkey, lookup_local);
		mr = lookup_mr(pd, access, sge->lkey, RXE_LOOKUP_LOCAL);
		if (!mr) {
			err = -EINVAL;
			goto err1;
@@ -438,7 +423,7 @@ int copy_data(

			if (sge->length) {
				mr = lookup_mr(pd, access, sge->lkey,
					       lookup_local);
					       RXE_LOOKUP_LOCAL);
				if (!mr) {
					err = -EINVAL;
					goto err1;
@@ -520,7 +505,7 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
 * (4) verify that mr state is valid
 */
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
			 enum lookup_type type)
			 enum rxe_mr_lookup_type type)
{
	struct rxe_mr *mr;
	struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
@@ -530,8 +515,8 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
	if (!mr)
		return NULL;

	if (unlikely((type == lookup_local && mr_lkey(mr) != key) ||
		     (type == lookup_remote && mr_rkey(mr) != key) ||
	if (unlikely((type == RXE_LOOKUP_LOCAL && mr_lkey(mr) != key) ||
		     (type == RXE_LOOKUP_REMOTE && mr_rkey(mr) != key) ||
		     mr_pd(mr) != pd || (access && !(access & mr->access)) ||
		     mr->state != RXE_MR_STATE_VALID)) {
		rxe_drop_ref(mr);
@@ -540,3 +525,47 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,

	return mr;
}

int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
{
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
	struct rxe_mr *mr;
	int ret;

	mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
	if (!mr) {
		pr_err("%s: No MR for rkey %#x\n", __func__, rkey);
		ret = -EINVAL;
		goto err;
	}

	if (rkey != mr->ibmr.rkey) {
		pr_err("%s: rkey (%#x) doesn't match mr->ibmr.rkey (%#x)\n",
			__func__, rkey, mr->ibmr.rkey);
		ret = -EINVAL;
		goto err_drop_ref;
	}

	mr->state = RXE_MR_STATE_FREE;
	ret = 0;

err_drop_ref:
	rxe_drop_ref(mr);
err:
	return ret;
}

void rxe_mr_cleanup(struct rxe_pool_entry *arg)
{
	struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
	int i;

	ib_umem_release(mr->umem);

	if (mr->map) {
		for (i = 0; i < mr->num_map; i++)
			kfree(mr->map[i]);

		kfree(mr->map);
	}
}
+67 −0
Original line number Diff line number Diff line
@@ -245,6 +245,73 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
	return ret;
}

static int rxe_check_invalidate_mw(struct rxe_qp *qp, struct rxe_mw *mw)
{
	if (unlikely(mw->state == RXE_MW_STATE_INVALID))
		return -EINVAL;

	/* o10-37.2.26 */
	if (unlikely(mw->ibmw.type == IB_MW_TYPE_1))
		return -EINVAL;

	return 0;
}

static void rxe_do_invalidate_mw(struct rxe_mw *mw)
{
	struct rxe_qp *qp;
	struct rxe_mr *mr;

	/* valid type 2 MW will always have a QP pointer */
	qp = mw->qp;
	mw->qp = NULL;
	rxe_drop_ref(qp);

	/* valid type 2 MW will always have an MR pointer */
	mr = mw->mr;
	mw->mr = NULL;
	atomic_dec(&mr->num_mw);
	rxe_drop_ref(mr);

	mw->access = 0;
	mw->addr = 0;
	mw->length = 0;
	mw->state = RXE_MW_STATE_FREE;
}

int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
{
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
	unsigned long flags;
	struct rxe_mw *mw;
	int ret;

	mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
	if (!mw) {
		ret = -EINVAL;
		goto err;
	}

	if (rkey != mw->ibmw.rkey) {
		ret = -EINVAL;
		goto err_drop_ref;
	}

	spin_lock_irqsave(&mw->lock, flags);

	ret = rxe_check_invalidate_mw(qp, mw);
	if (ret)
		goto err_unlock;

	rxe_do_invalidate_mw(mw);
err_unlock:
	spin_unlock_irqrestore(&mw->lock, flags);
err_drop_ref:
	rxe_drop_ref(mw);
err:
	return ret;
}

void rxe_mw_cleanup(struct rxe_pool_entry *elem)
{
	struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem);
+8 −10
Original line number Diff line number Diff line
@@ -487,7 +487,7 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
		} else {
			err = copy_data(qp->pd, 0, &wqe->dma,
					payload_addr(pkt), paylen,
					from_mr_obj,
					RXE_FROM_MR_OBJ,
					&crc);
			if (err)
				return err;
@@ -581,27 +581,25 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
	u8 opcode = wqe->wr.opcode;
	struct rxe_dev *rxe;
	struct rxe_mr *mr;
	u32 rkey;
	int ret;

	switch (opcode) {
	case IB_WR_LOCAL_INV:
		rxe = to_rdev(qp->ibqp.device);
		rkey = wqe->wr.ex.invalidate_rkey;
		mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
		if (!mr) {
			pr_err("No MR for rkey %#x\n", rkey);
		if (rkey_is_mw(rkey))
			ret = rxe_invalidate_mw(qp, rkey);
		else
			ret = rxe_invalidate_mr(qp, rkey);

		if (unlikely(ret)) {
			wqe->status = IB_WC_LOC_QP_OP_ERR;
			return -EINVAL;
			return ret;
		}
		mr->state = RXE_MR_STATE_FREE;
		rxe_drop_ref(mr);
		break;
	case IB_WR_REG_MR:
		mr = to_rmr(wqe->wr.wr.reg.mr);

		rxe_add_ref(mr);
		mr->state = RXE_MR_STATE_VALID;
		mr->access = wqe->wr.wr.reg.access;
Loading