Commit 780d8ce7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma updates from Jason Gunthorpe:
 "Small collection of incremental improvement patches:

   - Minor code cleanup patches, comment improvements, etc from static
     tools

   - Clean the some of the kernel caps, reducing the historical stealth
     uAPI leftovers

   - Bug fixes and minor changes for rdmavt, hns, rxe, irdma

   - Remove unimplemented cruft from rxe

   - Reorganize UMR QP code in mlx5 to avoid going through the IB verbs
     layer

   - flush_workqueue(system_unbound_wq) removal

   - Ensure rxe waits for objects to be unused before allowing the core
     to free them

   - Several rc quality bug fixes for hfi1"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (67 commits)
  RDMA/rtrs-clt: Fix one kernel-doc comment
  RDMA/hfi1: Remove all traces of diagpkt support
  RDMA/hfi1: Consolidate software versions
  RDMA/hfi1: Remove pointless driver version
  RDMA/hfi1: Fix potential integer multiplication overflow errors
  RDMA/hfi1: Prevent panic when SDMA is disabled
  RDMA/hfi1: Prevent use of lock before it is initialized
  RDMA/rxe: Fix an error handling path in rxe_get_mcg()
  IB/core: Fix typo in comment
  RDMA/core: Fix typo in comment
  IB/hf1: Fix typo in comment
  IB/qib: Fix typo in comment
  IB/iser: Fix typo in comment
  RDMA/mlx4: Avoid flush_scheduled_work() usage
  IB/isert: Avoid flush_scheduled_work() usage
  RDMA/mlx5: Remove duplicate pointer assignment in mlx5_ib_alloc_implicit_mr()
  RDMA/qedr: Remove unnecessary synchronize_irq() before free_irq()
  RDMA/hns: Use hr_reg_read() instead of remaining roce_get_xxx()
  RDMA/hns: Use hr_reg_xxx() instead of remaining roce_set_xxx()
  RDMA/irdma: Add SW mechanism to generate completions on error
  ...
parents 090b39af 9c477178
Loading
Loading
Loading
Loading
+14 −10
Original line number Diff line number Diff line
@@ -58,6 +58,7 @@ struct workqueue_struct *ib_comp_wq;
struct workqueue_struct *ib_comp_unbound_wq;
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
static struct workqueue_struct *ib_unreg_wq;

/*
 * Each of the three rwsem locks (devices, clients, client_data) protects the
@@ -1602,7 +1603,7 @@ void ib_unregister_device_queued(struct ib_device *ib_dev)
	WARN_ON(!refcount_read(&ib_dev->refcount));
	WARN_ON(!ib_dev->ops.dealloc_driver);
	get_device(&ib_dev->dev);
	if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work))
	if (!queue_work(ib_unreg_wq, &ib_dev->unregistration_work))
		put_device(&ib_dev->dev);
}
EXPORT_SYMBOL(ib_unregister_device_queued);
@@ -2751,27 +2752,28 @@ static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {

static int __init ib_core_init(void)
{
	int ret;
	int ret = -ENOMEM;

	ib_wq = alloc_workqueue("infiniband", 0, 0);
	if (!ib_wq)
		return -ENOMEM;

	ib_unreg_wq = alloc_workqueue("ib-unreg-wq", WQ_UNBOUND,
				      WQ_UNBOUND_MAX_ACTIVE);
	if (!ib_unreg_wq)
		goto err;

	ib_comp_wq = alloc_workqueue("ib-comp-wq",
			WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
	if (!ib_comp_wq) {
		ret = -ENOMEM;
		goto err;
	}
	if (!ib_comp_wq)
		goto err_unbound;

	ib_comp_unbound_wq =
		alloc_workqueue("ib-comp-unb-wq",
				WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
				WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
	if (!ib_comp_unbound_wq) {
		ret = -ENOMEM;
	if (!ib_comp_unbound_wq)
		goto err_comp;
	}

	ret = class_register(&ib_class);
	if (ret) {
@@ -2831,6 +2833,8 @@ static int __init ib_core_init(void)
	destroy_workqueue(ib_comp_unbound_wq);
err_comp:
	destroy_workqueue(ib_comp_wq);
err_unbound:
	destroy_workqueue(ib_unreg_wq);
err:
	destroy_workqueue(ib_wq);
	return ret;
@@ -2852,7 +2856,7 @@ static void __exit ib_core_cleanup(void)
	destroy_workqueue(ib_comp_wq);
	/* Make sure that any pending umem accounting work is done. */
	destroy_workqueue(ib_wq);
	flush_workqueue(system_unbound_wq);
	destroy_workqueue(ib_unreg_wq);
	WARN_ON(!xa_empty(&clients));
	WARN_ON(!xa_empty(&devices));
}
+1 −1
Original line number Diff line number Diff line
@@ -1739,7 +1739,7 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
	if (!device)
		return -EINVAL;

	if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) {
	if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) {
		ib_device_put(device);
		return -EINVAL;
	}
+8 −8
Original line number Diff line number Diff line
@@ -1034,10 +1034,9 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
			      struct netlink_ext_ack *extack)
{
	unsigned long flags;
	struct ib_sa_query *query;
	struct ib_sa_query *query = NULL, *iter;
	struct ib_mad_send_buf *send_buf;
	struct ib_mad_send_wc mad_send_wc;
	int found = 0;
	int ret;

	if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
@@ -1045,20 +1044,21 @@ int ib_nl_handle_resolve_resp(struct sk_buff *skb,
		return -EPERM;

	spin_lock_irqsave(&ib_nl_request_lock, flags);
	list_for_each_entry(query, &ib_nl_request_list, list) {
	list_for_each_entry(iter, &ib_nl_request_list, list) {
		/*
		 * If the query is cancelled, let the timeout routine
		 * take care of it.
		 */
		if (nlh->nlmsg_seq == query->seq) {
			found = !ib_sa_query_cancelled(query);
			if (found)
				list_del(&query->list);
		if (nlh->nlmsg_seq == iter->seq) {
			if (!ib_sa_query_cancelled(iter)) {
				list_del(&iter->list);
				query = iter;
			}
			break;
		}
	}

	if (!found) {
	if (!query) {
		spin_unlock_irqrestore(&ib_nl_request_lock, flags);
		goto resp_out;
	}
+1 −1
Original line number Diff line number Diff line
@@ -455,7 +455,7 @@ int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 user_virt,
			break;
		}
	}
	/* upon sucesss lock should stay on hold for the callee */
	/* upon success lock should stay on hold for the callee */
	if (!ret)
		ret = dma_index - start_idx;
	else
+4 −4
Original line number Diff line number Diff line
@@ -281,7 +281,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
	}
	rdma_restrack_add(&pd->res);

	if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
	if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)
		pd->local_dma_lkey = device->local_dma_lkey;
	else
		mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
@@ -308,7 +308,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,

		pd->__internal_mr = mr;

		if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
		if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY))
			pd->local_dma_lkey = pd->__internal_mr->lkey;

		if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@@ -2131,8 +2131,8 @@ struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
	struct ib_mr *mr;

	if (access_flags & IB_ACCESS_ON_DEMAND) {
		if (!(pd->device->attrs.device_cap_flags &
		      IB_DEVICE_ON_DEMAND_PAGING)) {
		if (!(pd->device->attrs.kernel_cap_flags &
		      IBK_ON_DEMAND_PAGING)) {
			pr_debug("ODP support not available\n");
			return ERR_PTR(-EINVAL);
		}
Loading