Commit c741e491 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull rdma fixes from Jason Gunthorpe:
 "Quite a few small bug fixes old and new, also Doug Ledford is retiring
  now, we thank him for his work. Details:

   - Use after free in rxe

   - mlx5 DM regression

   - hns bugs triggred by device reset

   - Two fixes for CONFIG_DEBUG_PREEMPT

   - Several longstanding corner case bugs in hfi1

   - Two irdma data path bugs in rare cases and some memory issues"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/irdma: Don't arm the CQ more than two times if no CE for this CQ
  RDMA/irdma: Report correct WC errors
  RDMA/irdma: Fix a potential memory allocation issue in 'irdma_prm_add_pble_mem()'
  RDMA/irdma: Fix a user-after-free in add_pble_prm
  IB/hfi1: Fix leak of rcvhdrtail_dummy_kvaddr
  IB/hfi1: Fix early init panic
  IB/hfi1: Insure use of smp_processor_id() is preempt disabled
  IB/hfi1: Correct guard on eager buffer deallocation
  RDMA/rtrs: Call {get,put}_cpu_ptr to silence a debug kernel warning
  RDMA/hns: Do not destroy QP resources in the hw resetting phase
  RDMA/hns: Do not halt commands during reset until later
  Remove Doug Ledford from MAINTAINERS
  RDMA/mlx5: Fix releasing unallocated memory in dereg MR flow
  RDMA: Fix use-after-free in rxe_queue_cleanup
parents ded746bf 10467ce0
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -9329,7 +9329,6 @@ S: Maintained
F:	drivers/iio/pressure/dps310.c
INFINIBAND SUBSYSTEM
M:	Doug Ledford <dledford@redhat.com>
M:	Jason Gunthorpe <jgg@nvidia.com>
L:	linux-rdma@vger.kernel.org
S:	Supported
+2 −0
Original line number Diff line number Diff line
@@ -8415,6 +8415,8 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
 */
static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
{
	if (!rcd->rcvhdrq)
		return;
	clear_recv_intr(rcd);
	if (check_packet_present(rcd))
		force_recv_intr(rcd);
+2 −0
Original line number Diff line number Diff line
@@ -1012,6 +1012,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
	struct hfi1_packet packet;
	int skip_pkt = 0;

	if (!rcd->rcvhdrq)
		return RCV_PKT_OK;
	/* Control context will always use the slow path interrupt handler */
	needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;

+17 −23
Original line number Diff line number Diff line
@@ -113,7 +113,6 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd,
	rcd->fast_handler = get_dma_rtail_setting(rcd) ?
				handle_receive_interrupt_dma_rtail :
				handle_receive_interrupt_nodma_rtail;
	rcd->slow_handler = handle_receive_interrupt;

	hfi1_set_seq_cnt(rcd, 1);

@@ -334,6 +333,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
		rcd->numa_id = numa;
		rcd->rcv_array_groups = dd->rcv_entries.ngroups;
		rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
		rcd->slow_handler = handle_receive_interrupt;
		rcd->do_interrupt = rcd->slow_handler;
		rcd->msix_intr = CCE_NUM_MSIX_VECTORS;

		mutex_init(&rcd->exp_mutex);
@@ -874,18 +875,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
	if (ret)
		goto done;

	/* allocate dummy tail memory for all receive contexts */
	dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
							 sizeof(u64),
							 &dd->rcvhdrtail_dummy_dma,
							 GFP_KERNEL);

	if (!dd->rcvhdrtail_dummy_kvaddr) {
		dd_dev_err(dd, "cannot allocate dummy tail memory\n");
		ret = -ENOMEM;
		goto done;
	}

	/* dd->rcd can be NULL if early initialization failed */
	for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
		/*
@@ -898,8 +887,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
		if (!rcd)
			continue;

		rcd->do_interrupt = &handle_receive_interrupt;

		lastfail = hfi1_create_rcvhdrq(dd, rcd);
		if (!lastfail)
			lastfail = hfi1_setup_eagerbufs(rcd);
@@ -1120,7 +1107,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
	rcd->egrbufs.rcvtids = NULL;

	for (e = 0; e < rcd->egrbufs.alloced; e++) {
		if (rcd->egrbufs.buffers[e].dma)
		if (rcd->egrbufs.buffers[e].addr)
			dma_free_coherent(&dd->pcidev->dev,
					  rcd->egrbufs.buffers[e].len,
					  rcd->egrbufs.buffers[e].addr,
@@ -1201,6 +1188,11 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
	dd->tx_opstats    = NULL;
	kfree(dd->comp_vect);
	dd->comp_vect = NULL;
	if (dd->rcvhdrtail_dummy_kvaddr)
		dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
				  (void *)dd->rcvhdrtail_dummy_kvaddr,
				  dd->rcvhdrtail_dummy_dma);
	dd->rcvhdrtail_dummy_kvaddr = NULL;
	sdma_clean(dd, dd->num_sdma);
	rvt_dealloc_device(&dd->verbs_dev.rdi);
}
@@ -1298,6 +1290,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
		goto bail;
	}

	/* allocate dummy tail memory for all receive contexts */
	dd->rcvhdrtail_dummy_kvaddr =
		dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
				   &dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
	if (!dd->rcvhdrtail_dummy_kvaddr) {
		ret = -ENOMEM;
		goto bail;
	}

	atomic_set(&dd->ipoib_rsm_usr_num, 0);
	return dd;

@@ -1505,13 +1506,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd)

	free_credit_return(dd);

	if (dd->rcvhdrtail_dummy_kvaddr) {
		dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
				  (void *)dd->rcvhdrtail_dummy_kvaddr,
				  dd->rcvhdrtail_dummy_dma);
		dd->rcvhdrtail_dummy_kvaddr = NULL;
	}

	/*
	 * Free any resources still in use (usually just kernel contexts)
	 * at unload; we do for ctxtcnt, because that's what we allocate.
+1 −1
Original line number Diff line number Diff line
@@ -838,8 +838,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
	if (current->nr_cpus_allowed != 1)
		goto out;

	cpu_id = smp_processor_id();
	rcu_read_lock();
	cpu_id = smp_processor_id();
	rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
				     sdma_rht_params);

Loading