Commit ca85855b authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Jason Gunthorpe:
 "Two bug fixes for irdma:

   - x722 does not support 1GB pages, trying to configure them will
     corrupt the dma mapping

   - Fix a sleep while holding a spinlock"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/irdma: Fix sleep from invalid context BUG
  RDMA/irdma: Do not advertise 1GB page size for x722
parents 80e19f34 cc031556
Loading
Loading
Loading
Loading
+0 −50
Original line number Diff line number Diff line
@@ -4231,10 +4231,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
	struct irdma_cm_node *cm_node;
	struct list_head teardown_list;
	struct ib_qp_attr attr;
	struct irdma_sc_vsi *vsi = &iwdev->vsi;
	struct irdma_sc_qp *sc_qp;
	struct irdma_qp *qp;
	int i;

	INIT_LIST_HEAD(&teardown_list);

@@ -4251,52 +4247,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
			irdma_cm_disconn(cm_node->iwqp);
		irdma_rem_ref_cm_node(cm_node);
	}
	if (!iwdev->roce_mode)
		return;

	INIT_LIST_HEAD(&teardown_list);
	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
		mutex_lock(&vsi->qos[i].qos_mutex);
		list_for_each_safe (list_node, list_core_temp,
				    &vsi->qos[i].qplist) {
			u32 qp_ip[4];

			sc_qp = container_of(list_node, struct irdma_sc_qp,
					     list);
			if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC)
				continue;

			qp = sc_qp->qp_uk.back_qp;
			if (!disconnect_all) {
				if (nfo->ipv4)
					qp_ip[0] = qp->udp_info.local_ipaddr[3];
				else
					memcpy(qp_ip,
					       &qp->udp_info.local_ipaddr[0],
					       sizeof(qp_ip));
			}

			if (disconnect_all ||
			    (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) &&
			     !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
				spin_lock(&iwdev->rf->qptable_lock);
				if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
					irdma_qp_add_ref(&qp->ibqp);
					list_add(&qp->teardown_entry,
						 &teardown_list);
				}
				spin_unlock(&iwdev->rf->qptable_lock);
			}
		}
		mutex_unlock(&vsi->qos[i].qos_mutex);
	}

	list_for_each_safe (list_node, list_core_temp, &teardown_list) {
		qp = container_of(list_node, struct irdma_qp, teardown_entry);
		attr.qp_state = IB_QPS_ERR;
		irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
		irdma_qp_rem_ref(&qp->ibqp);
	}
}

/**
+1 −0
Original line number Diff line number Diff line
@@ -201,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
	dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
	dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
	dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
	dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M;
	dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
	dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
	dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
+1 −0
Original line number Diff line number Diff line
@@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
	dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
	dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
	dev->irq_ops = &icrdma_irq_ops;
	dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
	dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
	dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
	dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
+1 −0
Original line number Diff line number Diff line
@@ -127,6 +127,7 @@ struct irdma_hw_attrs {
	u64 max_hw_outbound_msg_size;
	u64 max_hw_inbound_msg_size;
	u64 max_mr_size;
	u64 page_size_cap;
	u32 min_hw_qp_id;
	u32 min_hw_aeq_size;
	u32 max_hw_aeq_size;
+2 −2
Original line number Diff line number Diff line
@@ -32,7 +32,7 @@ static int irdma_query_device(struct ib_device *ibdev,
	props->vendor_part_id = pcidev->device;

	props->hw_ver = rf->pcidev->revision;
	props->page_size_cap = SZ_4K | SZ_2M | SZ_1G;
	props->page_size_cap = hw_attrs->page_size_cap;
	props->max_mr_size = hw_attrs->max_mr_size;
	props->max_qp = rf->max_qp - rf->used_qps;
	props->max_qp_wr = hw_attrs->max_qp_wr;
@@ -2781,7 +2781,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,

	if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
		iwmr->page_size = ib_umem_find_best_pgsz(region,
							 SZ_4K | SZ_2M | SZ_1G,
							 iwdev->rf->sc_dev.hw_attrs.page_size_cap,
							 virt);
		if (unlikely(!iwmr->page_size)) {
			kfree(iwmr);