Commit 2f25e3ba authored by Zhu Yanjun's avatar Zhu Yanjun Committed by Leon Romanovsky
Browse files

RDMA/irdma: Split CQ handler into irdma_reg_user_mr_type_cq



Split the source codes related with CQ handling into a new function.

Reviewed-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarZhu Yanjun <yanjun.zhu@linux.dev>
Link: https://lore.kernel.org/r/20230116193502.66540-5-yanjun.zhu@intel.com


Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent e965ef0e
Loading
Loading
Loading
Loading
+40 −29
Original line number Diff line number Diff line
@@ -2867,6 +2867,40 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
	return 0;
}

static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
				     struct ib_udata *udata,
				     struct irdma_mr *iwmr)
{
	struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
	struct irdma_pbl *iwpbl = &iwmr->iwpbl;
	struct irdma_ucontext *ucontext = NULL;
	u8 shadow_pgcnt = 1;
	unsigned long flags;
	bool use_pbles;
	u32 total;
	int err;

	if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
		shadow_pgcnt = 0;
	total = req.cq_pages + shadow_pgcnt;
	if (total > iwmr->page_cnt)
		return -EINVAL;

	use_pbles = req.cq_pages > 1;
	err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
	if (err)
		return err;

	ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
					     ibucontext);
	spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
	list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
	iwpbl->on_list = true;
	spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);

	return 0;
}

/**
 * irdma_reg_user_mr - Register a user memory region
 * @pd: ptr of pd
@@ -2882,16 +2916,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
{
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
	struct irdma_device *iwdev = to_iwdev(pd->device);
	struct irdma_ucontext *ucontext;
	struct irdma_pbl *iwpbl;
	struct irdma_mr *iwmr;
	struct ib_umem *region;
	struct irdma_mem_reg_req req;
	u32 total;
	u8 shadow_pgcnt = 1;
	bool use_pbles = false;
	unsigned long flags;
	int err = -EINVAL;
	struct irdma_mem_reg_req req = {};
	struct ib_umem *region = NULL;
	struct irdma_mr *iwmr = NULL;
	int err;

	if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
		return ERR_PTR(-EINVAL);
@@ -2918,8 +2946,6 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
		return (struct ib_mr *)iwmr;
	}

	iwpbl = &iwmr->iwpbl;

	switch (req.reg_type) {
	case IRDMA_MEMREG_TYPE_QP:
		err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
@@ -2928,25 +2954,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,

		break;
	case IRDMA_MEMREG_TYPE_CQ:
		if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
			shadow_pgcnt = 0;
		total = req.cq_pages + shadow_pgcnt;
		if (total > iwmr->page_cnt) {
			err = -EINVAL;
			goto error;
		}

		use_pbles = (req.cq_pages > 1);
		err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
		err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
		if (err)
			goto error;

		ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
						     ibucontext);
		spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
		list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
		iwpbl->on_list = true;
		spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
		break;
	case IRDMA_MEMREG_TYPE_MEM:
		err = irdma_reg_user_mr_type_mem(iwmr, access);
@@ -2955,6 +2965,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,

		break;
	default:
		err = -EINVAL;
		goto error;
	}