Commit 3672ac8a authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma updates from Jason Gunthorpe:
 "This is quite a small cycle, if not for Lee's 70 patches cleaning the
  kdocs it would be well below typical for patch count.

  Most of the interesting work here was in the HNS and rxe drivers which
  got fairly major internal changes.

  Summary:

   - Driver updates and bug fixes: siw, hns, bnxt_re, mlx5, efa

   - Significant rework in rxe to get it ready to have XRC support added

   - Several rts bug fixes

   - Big series to get to 'make W=1' cleanness, primarily updating kdocs

   - Support for creating a RDMA MR from a DMABUF fd to allow PCI peer
     to peer transfers to GPU VRAM

   - Device disassociation now works properly with umad

   - Work to support more than 255 ports on a RDMA device

   - Further support for the new HNS HIP09 hardware

   - Coding style cleanups: comma to semicolon, unneded semicolon/blank
     lines, remove 'h' printk format, don't check for NULL before kfree,
     use true/false for bool"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (205 commits)
  RDMA/rtrs-srv: Do not pass a valid pointer to PTR_ERR()
  RDMA/srp: Fix support for unpopulated and unbalanced NUMA nodes
  RDMA/mlx5: Fail QP creation if the device can not support the CQE TS
  RDMA/mlx5: Allow CQ creation without attached EQs
  RDMA/rtrs-srv-sysfs: fix missing put_device
  RDMA/rtrs-srv: fix memory leak by missing kobject free
  RDMA/rtrs: Only allow addition of path to an already established session
  RDMA/rtrs-srv: Fix stack-out-of-bounds
  RDMA/rxe: Remove unused pkt->offset
  RDMA/ucma: Fix use-after-free bug in ucma_create_uevent
  RDMA/core: Fix kernel doc warnings for ib_port_immutable_read()
  RDMA/qedr: Use true and false for bool variable
  RDMA/hns: Adjust definition of FRMR fields
  RDMA/hns: Refactor process of posting CMDQ
  RDMA/hns: Adjust fields and variables about CMDQ tail/head
  RDMA/hns: Remove redundant operations on CMDQ
  RDMA/hns: Fixes missing error code of CMDQ
  RDMA/hns: Remove unused member and variable of CMDQ
  RDMA/ipoib: Remove racy Subnet Manager sendonly join checks
  RDMA/mlx5: Support 400Gbps IB rate in mlx5 driver
  ...
parents bdb39c95 7289e26f
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ config INFINIBAND_USER_MEM
	bool
	depends on INFINIBAND_USER_ACCESS != n
	depends on MMU
	select DMA_SHARED_BUFFER
	default y

config INFINIBAND_ON_DEMAND_PAGING
+1 −1
Original line number Diff line number Diff line
@@ -40,5 +40,5 @@ ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o \
				uverbs_std_types_srq.o \
				uverbs_std_types_wq.o \
				uverbs_std_types_qp.o
ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_uverbs-$(CONFIG_INFINIBAND_USER_MEM) += umem.o umem_dmabuf.o
ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
+4 −5
Original line number Diff line number Diff line
@@ -669,11 +669,10 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 * rdma_find_gid_by_port - Returns the GID entry attributes when it finds
 * a valid GID entry for given search parameters. It searches for the specified
 * GID value in the local software cache.
 * @device: The device to query.
 * @ib_dev: The device to query.
 * @gid: The GID value to search for.
 * @gid_type: The GID type to search for.
 * @port_num: The port number of the device where the GID value should be
 *   searched.
 * @port: The port number of the device where the GID value should be searched.
 * @ndev: In RoCE, the net device of the device. NULL means ignore.
 *
 * Returns sgid attributes if the GID is found with valid reference or
@@ -719,7 +718,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port);
/**
 * rdma_find_gid_by_filter - Returns the GID table attribute where a
 * specified GID value occurs
 * @device: The device to query.
 * @ib_dev: The device to query.
 * @gid: The GID value to search for.
 * @port: The port number of the device where the GID value could be
 *   searched.
@@ -728,6 +727,7 @@ EXPORT_SYMBOL(rdma_find_gid_by_port);
 *   otherwise, we continue searching the GID table. It's guaranteed that
 *   while filter is executed, ndev field is valid and the structure won't
 *   change. filter is executed in an atomic context. filter must not be NULL.
 * @context: Private data to pass into the call-back.
 *
 * rdma_find_gid_by_filter() searches for the specified GID value
 * of which the filter function returns true in the port's GID table.
@@ -1253,7 +1253,6 @@ EXPORT_SYMBOL(rdma_get_gid_attr);
 * @entries: Entries where GID entries are returned.
 * @max_entries: Maximum number of entries that can be returned.
 * Entries array must be allocated to hold max_entries number of entries.
 * @num_entries: Updated to the number of entries that were successfully read.
 *
 * Returns number of entries on success or appropriate error code.
 */
+4 −4
Original line number Diff line number Diff line
@@ -4333,7 +4333,7 @@ static int cm_add_one(struct ib_device *ib_device)
	unsigned long flags;
	int ret;
	int count = 0;
	u8 i;
	unsigned int i;

	cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
			 GFP_KERNEL);
@@ -4345,7 +4345,7 @@ static int cm_add_one(struct ib_device *ib_device)
	cm_dev->going_down = 0;

	set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
	for (i = 1; i <= ib_device->phys_port_cnt; i++) {
	rdma_for_each_port (ib_device, i) {
		if (!rdma_cap_ib_cm(ib_device, i))
			continue;

@@ -4431,7 +4431,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
		.clr_port_cap_mask = IB_PORT_CM_SUP
	};
	unsigned long flags;
	int i;
	unsigned int i;

	write_lock_irqsave(&cm.device_lock, flags);
	list_del(&cm_dev->list);
@@ -4441,7 +4441,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
	cm_dev->going_down = 1;
	spin_unlock_irq(&cm.lock);

	for (i = 1; i <= ib_device->phys_port_cnt; i++) {
	rdma_for_each_port (ib_device, i) {
		if (!rdma_cap_ib_cm(ib_device, i))
			continue;

+41 −40
Original line number Diff line number Diff line
@@ -352,7 +352,13 @@ struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)

struct cma_multicast {
	struct rdma_id_private *id_priv;
	union {
		struct ib_sa_multicast *sa_mc;
		struct {
			struct work_struct work;
			struct rdma_cm_event event;
		} iboe_join;
	};
	struct list_head	list;
	void			*context;
	struct sockaddr_storage	addr;
@@ -1823,6 +1829,8 @@ static void destroy_mc(struct rdma_id_private *id_priv,
			cma_igmp_send(ndev, &mgid, false);
			dev_put(ndev);
		}

		cancel_work_sync(&mc->iboe_join.work);
	}
	kfree(mc);
}
@@ -2683,6 +2691,28 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv,
	return (id_priv->query_id < 0) ? id_priv->query_id : 0;
}

static void cma_iboe_join_work_handler(struct work_struct *work)
{
	struct cma_multicast *mc =
		container_of(work, struct cma_multicast, iboe_join.work);
	struct rdma_cm_event *event = &mc->iboe_join.event;
	struct rdma_id_private *id_priv = mc->id_priv;
	int ret;

	mutex_lock(&id_priv->handler_mutex);
	if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
	    READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
		goto out_unlock;

	ret = cma_cm_event_handler(id_priv, event);
	WARN_ON(ret);

out_unlock:
	mutex_unlock(&id_priv->handler_mutex);
	if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
		rdma_destroy_ah_attr(&event->param.ud.ah_attr);
}

static void cma_work_handler(struct work_struct *_work)
{
	struct cma_work *work = container_of(_work, struct cma_work, work);
@@ -4478,10 +4508,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
	cma_make_mc_event(status, id_priv, multicast, &event, mc);
	ret = cma_cm_event_handler(id_priv, &event);
	rdma_destroy_ah_attr(&event.param.ud.ah_attr);
	if (ret) {
		destroy_id_handler_unlock(id_priv);
		return 0;
	}
	WARN_ON(ret);

out:
	mutex_unlock(&id_priv->handler_mutex);
@@ -4542,17 +4569,6 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
	rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
	rec.join_state = mc->join_state;

	if ((rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) &&
	    (!ib_sa_sendonly_fullmem_support(&sa_client,
					     id_priv->id.device,
					     id_priv->id.port_num))) {
		dev_warn(
			&id_priv->id.device->dev,
			"RDMA CM: port %u Unable to multicast join: SM doesn't support Send Only Full Member option\n",
			id_priv->id.port_num);
		return -EOPNOTSUPP;
	}

	comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
		    IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
		    IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
@@ -4604,7 +4620,6 @@ static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
				   struct cma_multicast *mc)
{
	struct cma_work *work;
	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
	int err = 0;
	struct sockaddr *addr = (struct sockaddr *)&mc->addr;
@@ -4618,10 +4633,6 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
	if (cma_zero_addr(addr))
		return -EINVAL;

	work = kzalloc(sizeof *work, GFP_KERNEL);
	if (!work)
		return -ENOMEM;

	gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
		   rdma_start_port(id_priv->cma_dev->device)];
	cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
@@ -4632,10 +4643,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,

	if (dev_addr->bound_dev_if)
		ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
	if (!ndev) {
		err = -ENODEV;
		goto err_free;
	}
	if (!ndev)
		return -ENODEV;

	ib.rec.rate = iboe_get_rate(ndev);
	ib.rec.hop_limit = 1;
	ib.rec.mtu = iboe_get_mtu(ndev->mtu);
@@ -4653,24 +4663,15 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
			err = -ENOTSUPP;
	}
	dev_put(ndev);
	if (err || !ib.rec.mtu) {
		if (!err)
			err = -EINVAL;
		goto err_free;
	}
	if (err || !ib.rec.mtu)
		return err ?: -EINVAL;

	rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
		    &ib.rec.port_gid);
	work->id = id_priv;
	INIT_WORK(&work->work, cma_work_handler);
	cma_make_mc_event(0, id_priv, &ib, &work->event, mc);
	/* Balances with cma_id_put() in cma_work_handler */
	cma_id_get(id_priv);
	queue_work(cma_wq, &work->work);
	INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
	cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
	queue_work(cma_wq, &mc->iboe_join.work);
	return 0;

err_free:
	kfree(work);
	return err;
}

int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
Loading