Commit 6e0567b7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Jason Gunthorpe:
 "Last fixes before holidays. Nothing very exciting:

   - Work around a HW bug in HNS HIP08

   - Recent memory leak regression in qib

   - Incorrect use of kfree() for vmalloc memory in hns"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/hns: Replace kfree() with kvfree()
  IB/qib: Fix memory leak in qib_user_sdma_queue_pkts()
  RDMA/hns: Fix RNR retransmission issue for HIP08
parents 86085fe7 12d3bbdd
Loading
Loading
Loading
Loading
+57 −7
Original line number Diff line number Diff line
@@ -1594,11 +1594,17 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
	struct hns_roce_cmq_desc desc;
	struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
	u32 clock_cycles_of_1us;

	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
				      false);

	hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
		clock_cycles_of_1us = HNS_ROCE_1NS_CFG;
	else
		clock_cycles_of_1us = HNS_ROCE_1US_CFG;

	hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us);
	hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);

	return hns_roce_cmq_send(hr_dev, &desc, 1);
@@ -4802,6 +4808,30 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
	return ret;
}

static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
{
#define QP_ACK_TIMEOUT_MAX_HIP08 20
#define QP_ACK_TIMEOUT_OFFSET 10
#define QP_ACK_TIMEOUT_MAX 31

	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
		if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
			ibdev_warn(&hr_dev->ib_dev,
				   "Local ACK timeout shall be 0 to 20.\n");
			return false;
		}
		*timeout += QP_ACK_TIMEOUT_OFFSET;
	} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
		if (*timeout > QP_ACK_TIMEOUT_MAX) {
			ibdev_warn(&hr_dev->ib_dev,
				   "Local ACK timeout shall be 0 to 31.\n");
			return false;
		}
	}

	return true;
}

static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
				      const struct ib_qp_attr *attr,
				      int attr_mask,
@@ -4811,6 +4841,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
	int ret = 0;
	u8 timeout;

	if (attr_mask & IB_QP_AV) {
		ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
@@ -4820,12 +4851,10 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
	}

	if (attr_mask & IB_QP_TIMEOUT) {
		if (attr->timeout < 31) {
			hr_reg_write(context, QPC_AT, attr->timeout);
		timeout = attr->timeout;
		if (check_qp_timeout_cfg_range(hr_dev, &timeout)) {
			hr_reg_write(context, QPC_AT, timeout);
			hr_reg_clear(qpc_mask, QPC_AT);
		} else {
			ibdev_warn(&hr_dev->ib_dev,
				   "Local ACK timeout shall be 0 to 30.\n");
		}
	}

@@ -4882,7 +4911,9 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
		set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);

	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
		hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer);
		hr_reg_write(context, QPC_MIN_RNR_TIME,
			    hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ?
			    HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer);
		hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME);
	}

@@ -5499,6 +5530,16 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)

	hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count);
	hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT);

	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
		if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
			dev_info(hr_dev->dev,
				 "cq_period(%u) reached the upper limit, adjusted to 65.\n",
				 cq_period);
			cq_period = HNS_ROCE_MAX_CQ_PERIOD;
		}
		cq_period *= HNS_ROCE_CLOCK_ADJUST;
	}
	hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period);
	hr_reg_clear(cqc_mask, CQC_CQ_PERIOD);

@@ -5894,6 +5935,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
	hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
	hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);

	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
		if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) {
			dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n",
				 eq->eq_period);
			eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD;
		}
		eq->eq_period *= HNS_ROCE_CLOCK_ADJUST;
	}

	hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
	hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
	hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
+8 −0
Original line number Diff line number Diff line
@@ -1444,6 +1444,14 @@ struct hns_roce_dip {
	struct list_head node;	/* all dips are on a list */
};

/* only for RNR timeout issue of HIP08 */
#define HNS_ROCE_CLOCK_ADJUST 1000
#define HNS_ROCE_MAX_CQ_PERIOD 65
#define HNS_ROCE_MAX_EQ_PERIOD 65
#define HNS_ROCE_RNR_TIMER_10NS 1
#define HNS_ROCE_1US_CFG 999
#define HNS_ROCE_1NS_CFG 0

#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM	0x0
#define HNS_ROCE_AEQ_DEFAULT_INTERVAL	0x0
#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM	0x0
+1 −1
Original line number Diff line number Diff line
@@ -259,7 +259,7 @@ static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)

static void free_srq_wrid(struct hns_roce_srq *srq)
{
	kfree(srq->wrid);
	kvfree(srq->wrid);
	srq->wrid = NULL;
}

+1 −1
Original line number Diff line number Diff line
@@ -941,7 +941,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
					       &addrlimit) ||
			    addrlimit > type_max(typeof(pkt->addrlimit))) {
				ret = -EINVAL;
				goto free_pbc;
				goto free_pkt;
			}
			pkt->addrlimit = addrlimit;