Commit 364e282c authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe
Browse files

RDMA/rxe: Split MEM into MR and MW

In the original rxe implementation it was intended to use a common object
to represent MRs and MWs but they are different enough to separate these
into two objects.

This allows replacing the mem name with mr for MRs which is more
consistent with the style for the other objects and less likely to be
confusing. This is a long patch that mostly changes mem to mr where it
makes sense and adds a new rxe_mw struct.

Link: https://lore.kernel.org/r/20210325212425.2792-1-rpearson@hpe.com


Signed-off-by: default avatarBob Pearson <rpearson@hpe.com>
Acked-by: default avatarZhu Yanjun <zyjzyj2000@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 7410c2d0
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -345,7 +345,7 @@ static inline enum comp_state do_read(struct rxe_qp *qp,

	ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
			&wqe->dma, payload_addr(pkt),
			payload_size(pkt), to_mem_obj, NULL);
			payload_size(pkt), to_mr_obj, NULL);
	if (ret)
		return COMPST_ERROR;

@@ -365,7 +365,7 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,

	ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
			&wqe->dma, &atomic_orig,
			sizeof(u64), to_mem_obj, NULL);
			sizeof(u64), to_mr_obj, NULL);
	if (ret)
		return COMPST_ERROR;
	else
+13 −16
Original line number Diff line number Diff line
@@ -72,40 +72,37 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);

/* rxe_mr.c */
enum copy_direction {
	to_mem_obj,
	from_mem_obj,
	to_mr_obj,
	from_mr_obj,
};

void rxe_mem_init_dma(struct rxe_pd *pd,
		      int access, struct rxe_mem *mem);
void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr);

int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
		      u64 length, u64 iova, int access, struct ib_udata *udata,
		      struct rxe_mem *mr);
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
		     int access, struct ib_udata *udata, struct rxe_mr *mr);

int rxe_mem_init_fast(struct rxe_pd *pd,
		      int max_pages, struct rxe_mem *mem);
int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr);

int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
		 int length, enum copy_direction dir, u32 *crcp);
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
		enum copy_direction dir, u32 *crcp);

int copy_data(struct rxe_pd *pd, int access,
	      struct rxe_dma_info *dma, void *addr, int length,
	      enum copy_direction dir, u32 *crcp);

void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);

enum lookup_type {
	lookup_local,
	lookup_remote,
};

struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
			 enum lookup_type type);

int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);

void rxe_mem_cleanup(struct rxe_pool_entry *arg);
void rxe_mr_cleanup(struct rxe_pool_entry *arg);

int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);

+130 −141
Original line number Diff line number Diff line
@@ -24,16 +24,15 @@ static u8 rxe_get_key(void)
	return key;
}

int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
	switch (mem->type) {
	case RXE_MEM_TYPE_DMA:
	switch (mr->type) {
	case RXE_MR_TYPE_DMA:
		return 0;

	case RXE_MEM_TYPE_MR:
		if (iova < mem->iova ||
		    length > mem->length ||
		    iova > mem->iova + mem->length - length)
	case RXE_MR_TYPE_MR:
		if (iova < mr->iova || length > mr->length ||
		    iova > mr->iova + mr->length - length)
			return -EFAULT;
		return 0;

@@ -46,85 +45,83 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
				| IB_ACCESS_REMOTE_WRITE	\
				| IB_ACCESS_REMOTE_ATOMIC)

static void rxe_mem_init(int access, struct rxe_mem *mem)
static void rxe_mr_init(int access, struct rxe_mr *mr)
{
	u32 lkey = mem->pelem.index << 8 | rxe_get_key();
	u32 lkey = mr->pelem.index << 8 | rxe_get_key();
	u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;

	mem->ibmr.lkey		= lkey;
	mem->ibmr.rkey		= rkey;
	mem->state		= RXE_MEM_STATE_INVALID;
	mem->type		= RXE_MEM_TYPE_NONE;
	mem->map_shift		= ilog2(RXE_BUF_PER_MAP);
	mr->ibmr.lkey = lkey;
	mr->ibmr.rkey = rkey;
	mr->state = RXE_MR_STATE_INVALID;
	mr->type = RXE_MR_TYPE_NONE;
	mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}

void rxe_mem_cleanup(struct rxe_pool_entry *arg)
void rxe_mr_cleanup(struct rxe_pool_entry *arg)
{
	struct rxe_mem *mem = container_of(arg, typeof(*mem), pelem);
	struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
	int i;

	ib_umem_release(mem->umem);
	ib_umem_release(mr->umem);

	if (mem->map) {
		for (i = 0; i < mem->num_map; i++)
			kfree(mem->map[i]);
	if (mr->map) {
		for (i = 0; i < mr->num_map; i++)
			kfree(mr->map[i]);

		kfree(mem->map);
		kfree(mr->map);
	}
}

static int rxe_mem_alloc(struct rxe_mem *mem, int num_buf)
static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
	int i;
	int num_map;
	struct rxe_map **map = mem->map;
	struct rxe_map **map = mr->map;

	num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;

	mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
	if (!mem->map)
	mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
	if (!mr->map)
		goto err1;

	for (i = 0; i < num_map; i++) {
		mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
		if (!mem->map[i])
		mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
		if (!mr->map[i])
			goto err2;
	}

	BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));

	mem->map_shift	= ilog2(RXE_BUF_PER_MAP);
	mem->map_mask	= RXE_BUF_PER_MAP - 1;
	mr->map_shift = ilog2(RXE_BUF_PER_MAP);
	mr->map_mask = RXE_BUF_PER_MAP - 1;

	mem->num_buf = num_buf;
	mem->num_map = num_map;
	mem->max_buf = num_map * RXE_BUF_PER_MAP;
	mr->num_buf = num_buf;
	mr->num_map = num_map;
	mr->max_buf = num_map * RXE_BUF_PER_MAP;

	return 0;

err2:
	for (i--; i >= 0; i--)
		kfree(mem->map[i]);
		kfree(mr->map[i]);

	kfree(mem->map);
	kfree(mr->map);
err1:
	return -ENOMEM;
}

void rxe_mem_init_dma(struct rxe_pd *pd,
		      int access, struct rxe_mem *mem)
void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
{
	rxe_mem_init(access, mem);
	rxe_mr_init(access, mr);

	mem->ibmr.pd		= &pd->ibpd;
	mem->access		= access;
	mem->state		= RXE_MEM_STATE_VALID;
	mem->type		= RXE_MEM_TYPE_DMA;
	mr->ibmr.pd = &pd->ibpd;
	mr->access = access;
	mr->state = RXE_MR_STATE_VALID;
	mr->type = RXE_MR_TYPE_DMA;
}

int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
		      u64 length, u64 iova, int access, struct ib_udata *udata,
		      struct rxe_mem *mem)
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
		     int access, struct ib_udata *udata, struct rxe_mr *mr)
{
	struct rxe_map		**map;
	struct rxe_phys_buf	*buf = NULL;
@@ -142,23 +139,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
		goto err1;
	}

	mem->umem = umem;
	mr->umem = umem;
	num_buf = ib_umem_num_pages(umem);

	rxe_mem_init(access, mem);
	rxe_mr_init(access, mr);

	err = rxe_mem_alloc(mem, num_buf);
	err = rxe_mr_alloc(mr, num_buf);
	if (err) {
		pr_warn("err %d from rxe_mem_alloc\n", err);
		pr_warn("err %d from rxe_mr_alloc\n", err);
		ib_umem_release(umem);
		goto err1;
	}

	mem->page_shift		= PAGE_SHIFT;
	mem->page_mask = PAGE_SIZE - 1;
	mr->page_shift = PAGE_SHIFT;
	mr->page_mask = PAGE_SIZE - 1;

	num_buf			= 0;
	map			= mem->map;
	map = mr->map;
	if (length > 0) {
		buf = map[0]->buf;

@@ -185,15 +182,15 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
		}
	}

	mem->ibmr.pd		= &pd->ibpd;
	mem->umem		= umem;
	mem->access		= access;
	mem->length		= length;
	mem->iova		= iova;
	mem->va			= start;
	mem->offset		= ib_umem_offset(umem);
	mem->state		= RXE_MEM_STATE_VALID;
	mem->type		= RXE_MEM_TYPE_MR;
	mr->ibmr.pd = &pd->ibpd;
	mr->umem = umem;
	mr->access = access;
	mr->length = length;
	mr->iova = iova;
	mr->va = start;
	mr->offset = ib_umem_offset(umem);
	mr->state = RXE_MR_STATE_VALID;
	mr->type = RXE_MR_TYPE_MR;

	return 0;

@@ -201,24 +198,23 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
	return err;
}

int rxe_mem_init_fast(struct rxe_pd *pd,
		      int max_pages, struct rxe_mem *mem)
int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
{
	int err;

	rxe_mem_init(0, mem);
	rxe_mr_init(0, mr);

	/* In fastreg, we also set the rkey */
	mem->ibmr.rkey = mem->ibmr.lkey;
	mr->ibmr.rkey = mr->ibmr.lkey;

	err = rxe_mem_alloc(mem, max_pages);
	err = rxe_mr_alloc(mr, max_pages);
	if (err)
		goto err1;

	mem->ibmr.pd		= &pd->ibpd;
	mem->max_buf		= max_pages;
	mem->state		= RXE_MEM_STATE_FREE;
	mem->type		= RXE_MEM_TYPE_MR;
	mr->ibmr.pd = &pd->ibpd;
	mr->max_buf = max_pages;
	mr->state = RXE_MR_STATE_FREE;
	mr->type = RXE_MR_TYPE_MR;

	return 0;

@@ -226,28 +222,24 @@ int rxe_mem_init_fast(struct rxe_pd *pd,
	return err;
}

static void lookup_iova(
	struct rxe_mem	*mem,
	u64			iova,
	int			*m_out,
	int			*n_out,
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
			size_t *offset_out)
{
	size_t			offset = iova - mem->iova + mem->offset;
	size_t offset = iova - mr->iova + mr->offset;
	int			map_index;
	int			buf_index;
	u64			length;

	if (likely(mem->page_shift)) {
		*offset_out = offset & mem->page_mask;
		offset >>= mem->page_shift;
		*n_out = offset & mem->map_mask;
		*m_out = offset >> mem->map_shift;
	if (likely(mr->page_shift)) {
		*offset_out = offset & mr->page_mask;
		offset >>= mr->page_shift;
		*n_out = offset & mr->map_mask;
		*m_out = offset >> mr->map_shift;
	} else {
		map_index = 0;
		buf_index = 0;

		length = mem->map[map_index]->buf[buf_index].size;
		length = mr->map[map_index]->buf[buf_index].size;

		while (offset >= length) {
			offset -= length;
@@ -257,7 +249,7 @@ static void lookup_iova(
				map_index++;
				buf_index = 0;
			}
			length = mem->map[map_index]->buf[buf_index].size;
			length = mr->map[map_index]->buf[buf_index].size;
		}

		*m_out = map_index;
@@ -266,48 +258,48 @@ static void lookup_iova(
	}
}

void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
{
	size_t offset;
	int m, n;
	void *addr;

	if (mem->state != RXE_MEM_STATE_VALID) {
		pr_warn("mem not in valid state\n");
	if (mr->state != RXE_MR_STATE_VALID) {
		pr_warn("mr not in valid state\n");
		addr = NULL;
		goto out;
	}

	if (!mem->map) {
	if (!mr->map) {
		addr = (void *)(uintptr_t)iova;
		goto out;
	}

	if (mem_check_range(mem, iova, length)) {
	if (mr_check_range(mr, iova, length)) {
		pr_warn("range violation\n");
		addr = NULL;
		goto out;
	}

	lookup_iova(mem, iova, &m, &n, &offset);
	lookup_iova(mr, iova, &m, &n, &offset);

	if (offset + length > mem->map[m]->buf[n].size) {
	if (offset + length > mr->map[m]->buf[n].size) {
		pr_warn("crosses page boundary\n");
		addr = NULL;
		goto out;
	}

	addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset;
	addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;

out:
	return addr;
}

/* copy data from a range (vaddr, vaddr+length-1) to or from
 * a mem object starting at iova. Compute incremental value of
 * crc32 if crcp is not zero. caller must hold a reference to mem
 * a mr object starting at iova. Compute incremental value of
 * crc32 if crcp is not zero. caller must hold a reference to mr
 */
int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
		enum copy_direction dir, u32 *crcp)
{
	int			err;
@@ -323,43 +315,41 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
	if (length == 0)
		return 0;

	if (mem->type == RXE_MEM_TYPE_DMA) {
	if (mr->type == RXE_MR_TYPE_DMA) {
		u8 *src, *dest;

		src  = (dir == to_mem_obj) ?
			addr : ((void *)(uintptr_t)iova);
		src = (dir == to_mr_obj) ? addr : ((void *)(uintptr_t)iova);

		dest = (dir == to_mem_obj) ?
			((void *)(uintptr_t)iova) : addr;
		dest = (dir == to_mr_obj) ? ((void *)(uintptr_t)iova) : addr;

		memcpy(dest, src, length);

		if (crcp)
			*crcp = rxe_crc32(to_rdev(mem->ibmr.device),
					*crcp, dest, length);
			*crcp = rxe_crc32(to_rdev(mr->ibmr.device), *crcp, dest,
					  length);

		return 0;
	}

	WARN_ON_ONCE(!mem->map);
	WARN_ON_ONCE(!mr->map);

	err = mem_check_range(mem, iova, length);
	err = mr_check_range(mr, iova, length);
	if (err) {
		err = -EFAULT;
		goto err1;
	}

	lookup_iova(mem, iova, &m, &i, &offset);
	lookup_iova(mr, iova, &m, &i, &offset);

	map	= mem->map + m;
	map = mr->map + m;
	buf	= map[0]->buf + i;

	while (length > 0) {
		u8 *src, *dest;

		va	= (u8 *)(uintptr_t)buf->addr + offset;
		src  = (dir == to_mem_obj) ? addr : va;
		dest = (dir == to_mem_obj) ? va : addr;
		src = (dir == to_mr_obj) ? addr : va;
		dest = (dir == to_mr_obj) ? va : addr;

		bytes	= buf->size - offset;

@@ -369,8 +359,8 @@ int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
		memcpy(dest, src, bytes);

		if (crcp)
			crc = rxe_crc32(to_rdev(mem->ibmr.device),
					crc, dest, bytes);
			crc = rxe_crc32(to_rdev(mr->ibmr.device), crc, dest,
					bytes);

		length	-= bytes;
		addr	+= bytes;
@@ -411,7 +401,7 @@ int copy_data(
	struct rxe_sge		*sge	= &dma->sge[dma->cur_sge];
	int			offset	= dma->sge_offset;
	int			resid	= dma->resid;
	struct rxe_mem		*mem	= NULL;
	struct rxe_mr		*mr	= NULL;
	u64			iova;
	int			err;

@@ -424,8 +414,8 @@ int copy_data(
	}

	if (sge->length && (offset < sge->length)) {
		mem = lookup_mem(pd, access, sge->lkey, lookup_local);
		if (!mem) {
		mr = lookup_mr(pd, access, sge->lkey, lookup_local);
		if (!mr) {
			err = -EINVAL;
			goto err1;
		}
@@ -435,9 +425,9 @@ int copy_data(
		bytes = length;

		if (offset >= sge->length) {
			if (mem) {
				rxe_drop_ref(mem);
				mem = NULL;
			if (mr) {
				rxe_drop_ref(mr);
				mr = NULL;
			}
			sge++;
			dma->cur_sge++;
@@ -449,9 +439,9 @@ int copy_data(
			}

			if (sge->length) {
				mem = lookup_mem(pd, access, sge->lkey,
				mr = lookup_mr(pd, access, sge->lkey,
					       lookup_local);
				if (!mem) {
				if (!mr) {
					err = -EINVAL;
					goto err1;
				}
@@ -466,7 +456,7 @@ int copy_data(
		if (bytes > 0) {
			iova = sge->addr + offset;

			err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
			err = rxe_mr_copy(mr, iova, addr, bytes, dir, crcp);
			if (err)
				goto err2;

@@ -480,14 +470,14 @@ int copy_data(
	dma->sge_offset = offset;
	dma->resid	= resid;

	if (mem)
		rxe_drop_ref(mem);
	if (mr)
		rxe_drop_ref(mr);

	return 0;

err2:
	if (mem)
		rxe_drop_ref(mem);
	if (mr)
		rxe_drop_ref(mr);
err1:
	return err;
}
@@ -525,31 +515,30 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
	return 0;
}

/* (1) find the mem (mr or mw) corresponding to lkey/rkey
/* (1) find the mr corresponding to lkey/rkey
 *     depending on lookup_type
 * (2) verify that the (qp) pd matches the mem pd
 * (3) verify that the mem can support the requested access
 * (4) verify that mem state is valid
 * (2) verify that the (qp) pd matches the mr pd
 * (3) verify that the mr can support the requested access
 * (4) verify that mr state is valid
 */
struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
			 enum lookup_type type)
{
	struct rxe_mem *mem;
	struct rxe_mr *mr;
	struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
	int index = key >> 8;

	mem = rxe_pool_get_index(&rxe->mr_pool, index);
	if (!mem)
	mr = rxe_pool_get_index(&rxe->mr_pool, index);
	if (!mr)
		return NULL;

	if (unlikely((type == lookup_local && mr_lkey(mem) != key) ||
		     (type == lookup_remote && mr_rkey(mem) != key) ||
		     mr_pd(mem) != pd ||
		     (access && !(access & mem->access)) ||
		     mem->state != RXE_MEM_STATE_VALID)) {
		rxe_drop_ref(mem);
		mem = NULL;
	if (unlikely((type == lookup_local && mr_lkey(mr) != key) ||
		     (type == lookup_remote && mr_rkey(mr) != key) ||
		     mr_pd(mr) != pd || (access && !(access & mr->access)) ||
		     mr->state != RXE_MR_STATE_VALID)) {
		rxe_drop_ref(mr);
		mr = NULL;
	}

	return mem;
	return mr;
}
+6 −8
Original line number Diff line number Diff line
@@ -8,8 +8,6 @@
#include "rxe_loc.h"

/* info about object pools
 * note that mr and mw share a single index space
 * so that one can map an lkey to the correct type of object
 */
struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
	[RXE_TYPE_UC] = {
@@ -56,18 +54,18 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
	},
	[RXE_TYPE_MR] = {
		.name		= "rxe-mr",
		.size		= sizeof(struct rxe_mem),
		.elem_offset	= offsetof(struct rxe_mem, pelem),
		.cleanup	= rxe_mem_cleanup,
		.size		= sizeof(struct rxe_mr),
		.elem_offset	= offsetof(struct rxe_mr, pelem),
		.cleanup	= rxe_mr_cleanup,
		.flags		= RXE_POOL_INDEX,
		.max_index	= RXE_MAX_MR_INDEX,
		.min_index	= RXE_MIN_MR_INDEX,
	},
	[RXE_TYPE_MW] = {
		.name		= "rxe-mw",
		.size		= sizeof(struct rxe_mem),
		.elem_offset	= offsetof(struct rxe_mem, pelem),
		.flags		= RXE_POOL_INDEX,
		.size		= sizeof(struct rxe_mw),
		.elem_offset	= offsetof(struct rxe_mw, pelem),
		.flags		= RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
		.max_index	= RXE_MAX_MW_INDEX,
		.min_index	= RXE_MIN_MW_INDEX,
	},
+5 −5
Original line number Diff line number Diff line
@@ -464,7 +464,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
		} else {
			err = copy_data(qp->pd, 0, &wqe->dma,
					payload_addr(pkt), paylen,
					from_mem_obj,
					from_mr_obj,
					&crc);
			if (err)
				return err;
@@ -596,7 +596,7 @@ int rxe_requester(void *arg)
	if (wqe->mask & WR_REG_MASK) {
		if (wqe->wr.opcode == IB_WR_LOCAL_INV) {
			struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
			struct rxe_mem *rmr;
			struct rxe_mr *rmr;

			rmr = rxe_pool_get_index(&rxe->mr_pool,
						 wqe->wr.ex.invalidate_rkey >> 8);
@@ -607,14 +607,14 @@ int rxe_requester(void *arg)
				wqe->status = IB_WC_MW_BIND_ERR;
				goto exit;
			}
			rmr->state = RXE_MEM_STATE_FREE;
			rmr->state = RXE_MR_STATE_FREE;
			rxe_drop_ref(rmr);
			wqe->state = wqe_state_done;
			wqe->status = IB_WC_SUCCESS;
		} else if (wqe->wr.opcode == IB_WR_REG_MR) {
			struct rxe_mem *rmr = to_rmr(wqe->wr.wr.reg.mr);
			struct rxe_mr *rmr = to_rmr(wqe->wr.wr.reg.mr);

			rmr->state = RXE_MEM_STATE_VALID;
			rmr->state = RXE_MR_STATE_VALID;
			rmr->access = wqe->wr.wr.reg.access;
			rmr->ibmr.lkey = wqe->wr.wr.reg.key;
			rmr->ibmr.rkey = wqe->wr.wr.reg.key;
Loading