Commit f3ed4de6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Jason Gunthorpe:
 "Nothing special here, though Bob's regression fixes for rxe would have
  made it before the rc cycle had there not been such strong winter
  weather!

   - Fix corner cases in the rxe reference counting cleanup that are
     causing regressions in blktests for SRP

   - Two kdoc fixes so W=1 is clean

   - Missing error return in error unwind for mlx5

   - Wrong lock type nesting in IB CM"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/rxe: Fix errant WARN_ONCE in rxe_completer()
  RDMA/rxe: Fix extra deref in rxe_rcv_mcast_pkt()
  RDMA/rxe: Fix missed IB reference counting in loopback
  RDMA/uverbs: Fix kernel-doc warning of _uverbs_alloc
  RDMA/mlx5: Set correct kernel-doc identifier
  IB/mlx5: Add missing error code
  RDMA/rxe: Fix missing kconfig dependency on CRYPTO
  RDMA/cm: Fix IRQ restore in ib_send_cm_sidr_rep
parents de5bd6c5 545c4ab4
Loading
Loading
Loading
Loading
+3 −2
Original line number Original line Diff line number Diff line
@@ -3651,6 +3651,7 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
				   struct ib_cm_sidr_rep_param *param)
				   struct ib_cm_sidr_rep_param *param)
{
{
	struct ib_mad_send_buf *msg;
	struct ib_mad_send_buf *msg;
	unsigned long flags;
	int ret;
	int ret;


	lockdep_assert_held(&cm_id_priv->lock);
	lockdep_assert_held(&cm_id_priv->lock);
@@ -3676,12 +3677,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
		return ret;
		return ret;
	}
	}
	cm_id_priv->id.state = IB_CM_IDLE;
	cm_id_priv->id.state = IB_CM_IDLE;
	spin_lock_irq(&cm.lock);
	spin_lock_irqsave(&cm.lock, flags);
	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
	}
	}
	spin_unlock_irq(&cm.lock);
	spin_unlock_irqrestore(&cm.lock, flags);
	return 0;
	return 0;
}
}


+1 −1
Original line number Original line Diff line number Diff line
@@ -91,7 +91,7 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
}
}


/**
/**
 * uverbs_alloc() - Quickly allocate memory for use with a bundle
 * _uverbs_alloc() - Quickly allocate memory for use with a bundle
 * @bundle: The bundle
 * @bundle: The bundle
 * @size: Number of bytes to allocate
 * @size: Number of bytes to allocate
 * @flags: Allocator flags
 * @flags: Allocator flags
+3 −1
Original line number Original line Diff line number Diff line
@@ -2073,8 +2073,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(


		num_alloc_xa_entries++;
		num_alloc_xa_entries++;
		event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
		event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
		if (!event_sub)
		if (!event_sub) {
			err = -ENOMEM;
			goto err;
			goto err;
		}


		list_add_tail(&event_sub->event_list, &sub_list);
		list_add_tail(&event_sub->event_list, &sub_list);
		uverbs_uobject_get(&ev_file->uobj);
		uverbs_uobject_get(&ev_file->uobj);
+1 −1
Original line number Original line Diff line number Diff line
@@ -1082,7 +1082,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
	return ret ? ret : npages;
	return ret ? ret : npages;
}
}


/**
/*
 * Parse a series of data segments for page fault handling.
 * Parse a series of data segments for page fault handling.
 *
 *
 * @dev:  Pointer to mlx5 IB device
 * @dev:  Pointer to mlx5 IB device
+1 −0
Original line number Original line Diff line number Diff line
@@ -4,6 +4,7 @@ config RDMA_RXE
	depends on INET && PCI && INFINIBAND
	depends on INET && PCI && INFINIBAND
	depends on INFINIBAND_VIRT_DMA
	depends on INFINIBAND_VIRT_DMA
	select NET_UDP_TUNNEL
	select NET_UDP_TUNNEL
	select CRYPTO
	select CRYPTO_CRC32
	select CRYPTO_CRC32
	help
	help
	This driver implements the InfiniBand RDMA transport over
	This driver implements the InfiniBand RDMA transport over
Loading