Commit f31c32ef authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Jason Gunthorpe:
 "A few minor fixes:

   - Fix buffer management in SRP to correct a regression with the login
     authentication feature from v5.17

   - Don't iterate over non-present ports in mlx5

   - Fix an error introduced by the foritify work in cxgb4

   - Two bug fixes for the recently merged ERDMA driver

   - Unbreak RDMA dmabuf support, a regresion from v5.19"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA: Handle the return code from dma_resv_wait_timeout() properly
  RDMA/erdma: Correct the max_qp and max_cq capacities of the device
  RDMA/erdma: Using the key in FMR WR instead of MR structure
  RDMA/cxgb4: fix accept failure due to increased cpl_t5_pass_accept_rpl size
  RDMA/mlx5: Use the proper number of ports
  IB/iser: Fix login with authentication
parents b9bce6e5 b16de8b9
Loading
Loading
Loading
Loading
+7 −1
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
	struct scatterlist *sg;
	unsigned long start, end, cur = 0;
	unsigned int nmap = 0;
	long ret;
	int i;

	dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
@@ -67,9 +68,14 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
	 * may be not up-to-date. Wait for the exporter to finish
	 * the migration.
	 */
	return dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
	ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
				     DMA_RESV_USAGE_KERNEL,
				     false, MAX_SCHEDULE_TIMEOUT);
	if (ret < 0)
		return ret;
	if (ret == 0)
		return -ETIMEDOUT;
	return 0;
}
EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);

+9 −16
Original line number Diff line number Diff line
@@ -2468,31 +2468,24 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
			opt2 |= CCTRL_ECN_V(1);
	}

	skb_get(skb);
	rpl = cplhdr(skb);
	if (!is_t4(adapter_type)) {
		BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
		skb_trim(skb, sizeof(*rpl5));
		rpl5 = (void *)rpl;
		INIT_TP_WR(rpl5, ep->hwtid);
	} else {
		skb_trim(skb, sizeof(*rpl));
		INIT_TP_WR(rpl, ep->hwtid);
	}
	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
						    ep->hwtid));

	if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
		u32 isn = (prandom_u32() & ~7UL) - 1;

		skb = get_skb(skb, roundup(sizeof(*rpl5), 16), GFP_KERNEL);
		rpl5 = __skb_put_zero(skb, roundup(sizeof(*rpl5), 16));
		rpl = (void *)rpl5;
		INIT_TP_WR_CPL(rpl5, CPL_PASS_ACCEPT_RPL, ep->hwtid);
		opt2 |= T5_OPT_2_VALID_F;
		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
		opt2 |= T5_ISS_F;
		rpl5 = (void *)rpl;
		memset_after(rpl5, 0, iss);
		if (peer2peer)
			isn += 4;
		rpl5->iss = cpu_to_be32(isn);
		pr_debug("iss %u\n", be32_to_cpu(rpl5->iss));
	} else {
		skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
		rpl = __skb_put_zero(skb, sizeof(*rpl));
		INIT_TP_WR_CPL(rpl, CPL_PASS_ACCEPT_RPL, ep->hwtid);
	}

	rpl->opt0 = cpu_to_be64(opt0);
+1 −1
Original line number Diff line number Diff line
@@ -407,7 +407,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
			     to_erdma_access_flags(reg_wr(send_wr)->access);
		regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
		regmr_sge->length = cpu_to_le32(mr->ibmr.length);
		regmr_sge->stag = cpu_to_le32(mr->ibmr.lkey);
		regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
		attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
			FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
			FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
+2 −2
Original line number Diff line number Diff line
@@ -280,7 +280,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
	attr->vendor_id = PCI_VENDOR_ID_ALIBABA;
	attr->vendor_part_id = dev->pdev->device;
	attr->hw_ver = dev->pdev->revision;
	attr->max_qp = dev->attrs.max_qp;
	attr->max_qp = dev->attrs.max_qp - 1;
	attr->max_qp_wr = min(dev->attrs.max_send_wr, dev->attrs.max_recv_wr);
	attr->max_qp_rd_atom = dev->attrs.max_ord;
	attr->max_qp_init_rd_atom = dev->attrs.max_ird;
@@ -291,7 +291,7 @@ int erdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
	attr->max_send_sge = dev->attrs.max_send_sge;
	attr->max_recv_sge = dev->attrs.max_recv_sge;
	attr->max_sge_rd = dev->attrs.max_sge_rd;
	attr->max_cq = dev->attrs.max_cq;
	attr->max_cq = dev->attrs.max_cq - 1;
	attr->max_cqe = dev->attrs.max_cqe;
	attr->max_mr = dev->attrs.max_mr;
	attr->max_pd = dev->attrs.max_pd;
+16 −18
Original line number Diff line number Diff line
@@ -2738,26 +2738,24 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
	int err;
	int port;

	for (port = 1; port <= ARRAY_SIZE(dev->port_caps); port++) {
		dev->port_caps[port - 1].has_smi = false;
		if (MLX5_CAP_GEN(dev->mdev, port_type) ==
		    MLX5_CAP_PORT_TYPE_IB) {
			if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
				err = mlx5_query_hca_vport_context(dev->mdev, 0,
								   port, 0,
	if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
		return 0;

	for (port = 1; port <= dev->num_ports; port++) {
		if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
			dev->port_caps[port - 1].has_smi = true;
			continue;
		}
		err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
						   &vport_ctx);
		if (err) {
			mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
				    port, err);
			return err;
		}
				dev->port_caps[port - 1].has_smi =
					vport_ctx.has_smi;
			} else {
				dev->port_caps[port - 1].has_smi = true;
			}
		}
		dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
	}

	return 0;
}

Loading