Unverified Commit 412ff3a1 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3978 Bugfixes for RDMA/hns

Merge Pull Request from: @stinft 
 
Upload six patches to fix bugs.
Chengchang Tang (2):
   RDMA/hns: Rename the interrupts
   RDMA/hns: Fix printing level of asynchronous events
 Junxian Huang (2):
   RDMA/hns: Remove unnecessary QP type checks
   RDMA/hns: Add a max length of gid table
 Luoyouming (2):
   RDMA/hns: Fix a missing validation check for sl
   RDMA/hns: Fix congestions control algorithm type for UD

https://gitee.com/openeuler/kernel/issues/I8W3IU 
 
Link:https://gitee.com/openeuler/kernel/pulls/3978

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents fdf4c214 8c8000ce
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -62,6 +62,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
	struct hns_roce_ah *ah = to_hr_ah(ibah);
	u8 priority = 0;
	u8 tc_mode = 0;
	u32 sl_num;
	int ret;

	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
@@ -92,6 +93,14 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
	else
		ah->av.sl = rdma_ah_get_sl(ah_attr);

	sl_num = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
	if (unlikely(ah->av.sl > sl_num)) {
		ibdev_err_ratelimited(&hr_dev->ib_dev,
			  "failed to set sl, sl (%u) shouldn't be larger than %u.\n",
			  ah->av.sl, sl_num);
		return -EINVAL;
	}

	memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
	memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);

+26 −31
Original line number Diff line number Diff line
@@ -447,18 +447,12 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
			    struct hns_roce_qp *hr_qp)
{
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct ib_qp *ibqp = &hr_qp->ibqp;
	int ret;

	if (unlikely(ibqp->qp_type != IB_QPT_RC &&
		     ibqp->qp_type != IB_QPT_GSI &&
		     ibqp->qp_type != IB_QPT_UD)) {
		ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
			  ibqp->qp_type);
		return -EOPNOTSUPP;
	} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
	if (unlikely(hr_qp->state == IB_QPS_RESET ||
		     hr_qp->state == IB_QPS_INIT ||
		     hr_qp->state == IB_QPS_RTR)) {

		ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
			  hr_qp->state);
		return -EINVAL;
@@ -879,17 +873,8 @@ static int check_recv_valid(struct hns_roce_dev *hr_dev,
			    struct hns_roce_qp *hr_qp)
{
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct ib_qp *ibqp = &hr_qp->ibqp;
	int ret;

	if (unlikely(ibqp->qp_type != IB_QPT_RC &&
		     ibqp->qp_type != IB_QPT_GSI &&
		     ibqp->qp_type != IB_QPT_UD)) {
		ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n",
			  ibqp->qp_type);
		return -EOPNOTSUPP;
	}

	if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
		return -EIO;

@@ -2245,6 +2230,7 @@ static void set_hem_page_size(struct hns_roce_dev *hr_dev)
/* Apply all loaded caps before setting to hardware */
static void apply_func_caps(struct hns_roce_dev *hr_dev)
{
#define MAX_GID_TBL_LEN 256
	struct hns_roce_caps *caps = &hr_dev->caps;
	struct hns_roce_v2_priv *priv = hr_dev->priv;

@@ -2279,8 +2265,14 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
		caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ;

		caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
		caps->gid_table_len[0] = caps->gmv_bt_num *
					(HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);

		/* It's meaningless to support excessively large gid_table_len,
		 * as the type of sgid_index in kernel struct ib_global_route
		 * and userspace struct ibv_global_route are u8/uint8_t (0-255).
		 */
		caps->gid_table_len[0] = min_t(u32, MAX_GID_TBL_LEN,
					 caps->gmv_bt_num *
					 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz));

		caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
							  caps->gmv_entry_sz);
@@ -5155,6 +5147,7 @@ static int hns_roce_set_sl(struct ib_qp *ibqp,
	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
	struct ib_device *ibdev = &hr_dev->ib_dev;
	u32 sl_num;
	int ret;

	ret = hns_roce_hw_v2_get_dscp(hr_dev, get_tclass(&attr->ah_attr.grh),
@@ -5171,10 +5164,11 @@ static int hns_roce_set_sl(struct ib_qp *ibqp,
	else
		hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);

	if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
		ibdev_err(ibdev,
			  "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
			  hr_qp->sl, MAX_SERVICE_LEVEL);
	sl_num = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
	if (unlikely(hr_qp->sl > sl_num)) {
		ibdev_err_ratelimited(ibdev,
			  "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
			  hr_qp->sl, sl_num);
		return -EINVAL;
	}

@@ -6250,7 +6244,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
	case HNS_ROCE_EVENT_TYPE_COMM_EST:
		break;
	case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
		ibdev_warn(ibdev, "Send queue drained.\n");
		ibdev_dbg(ibdev, "send queue drained.\n");
		break;
	case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
		ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
@@ -6265,10 +6259,10 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
			  irq_work->queue_num, irq_work->sub_type);
		break;
	case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
		ibdev_warn(ibdev, "SRQ limit reach.\n");
		ibdev_dbg(ibdev, "SRQ limit reach.\n");
		break;
	case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
		ibdev_warn(ibdev, "SRQ last wqe reach.\n");
		ibdev_dbg(ibdev, "SRQ last wqe reach.\n");
		break;
	case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
		ibdev_err(ibdev, "SRQ catas error.\n");
@@ -6857,15 +6851,16 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
	/* irq contains: abnormal + AEQ + CEQ */
	for (j = 0; j < other_num; j++)
		snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
			 "hns-abn-%d", j);
			 "hns-%s-abn-%d", pci_name(hr_dev->pci_dev), j);

	for (j = other_num; j < (other_num + aeq_num); j++)
		snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
			 "hns-aeq-%d", j - other_num);
			 "hns-%s-aeq-%d", pci_name(hr_dev->pci_dev), j - other_num);

	for (j = (other_num + aeq_num); j < irq_num; j++)
		snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
			 "hns-ceq-%d", j - other_num - aeq_num);
			 "hns-%s-ceq-%d", pci_name(hr_dev->pci_dev),
			 j - other_num - aeq_num);

	for (j = 0; j < irq_num; j++) {
		if (j < other_num)
+24 −9
Original line number Diff line number Diff line
@@ -1096,6 +1096,9 @@ static inline void default_congest_type(struct hns_roce_dev *hr_dev,
{
	struct hns_roce_caps *caps = &hr_dev->caps;

	if (hr_qp->ibqp.qp_type == IB_QPT_UD)
		hr_qp->congest_type = HNS_ROCE_CREATE_QP_FLAGS_DCQCN;
	else
		hr_qp->congest_type = 1 << caps->default_congest_type;
}

@@ -1104,6 +1107,17 @@ static int set_congest_type(struct hns_roce_qp *hr_qp,
{
	int ret = 0;

	if (hr_qp->ibqp.qp_type == IB_QPT_UD &&
	    !(ucmd->congest_type_flags & HNS_ROCE_CREATE_QP_FLAGS_DCQCN)) {
		struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);

		ibdev_err_ratelimited(&hr_dev->ib_dev,
			"UD just support DCQCN. unsupported congest type 0x%llx.\n",
			ucmd->congest_type_flags);

		return -EINVAL;
	}

	if (ucmd->congest_type_flags & HNS_ROCE_CREATE_QP_FLAGS_DCQCN)
		hr_qp->congest_type = HNS_ROCE_CREATE_QP_FLAGS_DCQCN;
	else if (ucmd->congest_type_flags & HNS_ROCE_CREATE_QP_FLAGS_LDCP)
@@ -1118,19 +1132,17 @@ static int set_congest_type(struct hns_roce_qp *hr_qp,
	return ret;
}

static void set_congest_param(struct hns_roce_dev *hr_dev,
static int set_congest_param(struct hns_roce_dev *hr_dev,
			      struct hns_roce_qp *hr_qp,
			      struct hns_roce_ib_create_qp *ucmd)
{
	int ret;
	if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE)
		return set_congest_type(hr_qp, ucmd);

	if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE) {
		ret = set_congest_type(hr_qp, ucmd);
		if (ret == 0)
			return;
	}

	default_congest_type(hr_dev, hr_qp);

	return 0;
}

static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
@@ -1177,7 +1189,10 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
		if (ret)
			ibdev_err(ibdev, "Failed to set user SQ size, ret = %d\n",
				  ret);
		set_congest_param(hr_dev, hr_qp, ucmd);

		ret = set_congest_param(hr_dev, hr_qp, ucmd);
		if (ret)
			return ret;
	} else {
		if (init_attr->create_flags &
		    IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {