Commit 325a7eb8 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe
Browse files

RDMA/rxe: Cleanup page variables in rxe_mr.c

Cleanup usage of mr->page_shift and mr->page_mask and introduce
an extractor for mr->ibmr.page_size. Normal usage in the kernel
has page_mask masking out offset in page rather than masking out
the page number. The rxe driver had reversed that which was confusing.
Implicitly there can be a per mr page_size which was not uniformly
supported.

Link: https://lore.kernel.org/r/20230119235936.19728-6-rpearsonhpe@gmail.com


Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent d8bdb0eb
Loading
Loading
Loading
Loading
+14 −17
Original line number Diff line number Diff line
@@ -62,6 +62,9 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
	mr->lkey = mr->ibmr.lkey = lkey;
	mr->rkey = mr->ibmr.rkey = rkey;

	mr->ibmr.page_size = PAGE_SIZE;
	mr->page_mask = PAGE_MASK;
	mr->page_shift = PAGE_SHIFT;
	mr->state = RXE_MR_STATE_INVALID;
}

@@ -151,9 +154,6 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
		goto err_release_umem;
	}

	mr->page_shift = PAGE_SHIFT;
	mr->page_mask = PAGE_SIZE - 1;

	num_buf			= 0;
	map = mr->map;
	if (length > 0) {
@@ -182,7 +182,7 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
				goto err_release_umem;
			}
			buf->addr = (uintptr_t)vaddr;
			buf->size = PAGE_SIZE;
			buf->size = mr_page_size(mr);
			num_buf++;
			buf++;

@@ -191,10 +191,9 @@ int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,

	mr->umem = umem;
	mr->access = access;
	mr->offset = ib_umem_offset(umem);
	mr->page_offset = ib_umem_offset(umem);
	mr->state = RXE_MR_STATE_VALID;
	mr->ibmr.type = IB_MR_TYPE_USER;
	mr->ibmr.page_size = PAGE_SIZE;

	return 0;

@@ -248,29 +247,27 @@ int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
		  int sg_nents, unsigned int *sg_offset)
{
	struct rxe_mr *mr = to_rmr(ibmr);
	int n;

	mr->nbuf = 0;
	unsigned int page_size = mr_page_size(mr);

	n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
	mr->page_shift = ilog2(page_size);
	mr->page_mask = ~((u64)page_size - 1);
	mr->page_offset = ibmr->iova & (page_size - 1);

	mr->page_shift = ilog2(ibmr->page_size);
	mr->page_mask = ibmr->page_size - 1;
	mr->offset = ibmr->iova & mr->page_mask;
	mr->nbuf = 0;

	return n;
	return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
}

static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
			size_t *offset_out)
{
	size_t offset = iova - mr->ibmr.iova + mr->offset;
	size_t offset = iova - mr->ibmr.iova + mr->page_offset;
	int			map_index;
	int			buf_index;
	u64			length;

	if (likely(mr->page_shift)) {
		*offset_out = offset & mr->page_mask;
		*offset_out = offset & (mr_page_size(mr) - 1);
		offset >>= mr->page_shift;
		*n_out = offset & mr->map_mask;
		*m_out = offset >> mr->map_shift;
@@ -329,7 +326,7 @@ int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length)
	if (mr->ibmr.type == IB_MR_TYPE_DMA)
		return -EFAULT;

	offset = (iova - mr->ibmr.iova + mr->offset) & mr->page_mask;
	offset = (iova - mr->ibmr.iova + mr->page_offset) & mr->page_mask;
	while (length > 0) {
		u8 *va;
		int bytes;
+8 −3
Original line number Diff line number Diff line
@@ -310,11 +310,11 @@ struct rxe_mr {
	u32			lkey;
	u32			rkey;
	enum rxe_mr_state	state;
	u32			offset;
	int			access;

	int			page_shift;
	int			page_mask;
	unsigned int		page_offset;
	unsigned int		page_shift;
	u64			page_mask;
	int			map_shift;
	int			map_mask;

@@ -329,6 +329,11 @@ struct rxe_mr {
	struct rxe_map		**map;
};

static inline unsigned int mr_page_size(struct rxe_mr *mr)
{
	return mr ? mr->ibmr.page_size : PAGE_SIZE;
}

enum rxe_mw_state {
	RXE_MW_STATE_INVALID	= RXE_MR_STATE_INVALID,
	RXE_MW_STATE_FREE	= RXE_MR_STATE_FREE,