Commit c5d68d25 authored by Chuck Lever's avatar Chuck Lever
Browse files

svcrdma: Clean up allocation of svc_rdma_recv_ctxt



The physical device's favored NUMA node ID is available when
allocating a recv_ctxt. Use that value instead of relying on the
assumption that the memory allocation happens to be running on a
node close to the device.

This clean up eliminates the hack of destroying recv_ctxts that
were not created by the receive CQ thread -- recv_ctxts are now
always allocated on a "good" node.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent fe2b401e
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -135,7 +135,6 @@ struct svc_rdma_recv_ctxt {
	struct ib_sge		rc_recv_sge;
	void			*rc_recv_buf;
	struct xdr_stream	rc_stream;
	bool			rc_temp;
	u32			rc_byte_len;
	unsigned int		rc_page_count;
	u32			rc_inv_rkey;
+7 −11
Original line number Diff line number Diff line
@@ -125,14 +125,15 @@ static void svc_rdma_recv_cid_init(struct svcxprt_rdma *rdma,
static struct svc_rdma_recv_ctxt *
svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
{
	int node = ibdev_to_node(rdma->sc_cm_id->device);
	struct svc_rdma_recv_ctxt *ctxt;
	dma_addr_t addr;
	void *buffer;

	ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
	ctxt = kmalloc_node(sizeof(*ctxt), GFP_KERNEL, node);
	if (!ctxt)
		goto fail0;
	buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
	buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
	if (!buffer)
		goto fail1;
	addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
@@ -155,7 +156,6 @@ svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
	ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
	ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
	ctxt->rc_recv_buf = buffer;
	ctxt->rc_temp = false;
	return ctxt;

fail2:
@@ -232,10 +232,7 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
	pcl_free(&ctxt->rc_write_pcl);
	pcl_free(&ctxt->rc_reply_pcl);

	if (!ctxt->rc_temp)
	llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
	else
		svc_rdma_recv_ctxt_destroy(rdma, ctxt);
}

/**
@@ -258,7 +255,7 @@ void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
}

static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
				   unsigned int wanted, bool temp)
				   unsigned int wanted)
{
	const struct ib_recv_wr *bad_wr = NULL;
	struct svc_rdma_recv_ctxt *ctxt;
@@ -275,7 +272,6 @@ static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
			break;

		trace_svcrdma_post_recv(ctxt);
		ctxt->rc_temp = temp;
		ctxt->rc_recv_wr.next = recv_chain;
		recv_chain = &ctxt->rc_recv_wr;
		rdma->sc_pending_recvs++;
@@ -309,7 +305,7 @@ static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
 */
bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
{
	return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true);
	return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests);
}

/**
@@ -343,7 +339,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
	 * client reconnects.
	 */
	if (rdma->sc_pending_recvs < rdma->sc_max_requests)
		if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch, false))
		if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch))
			goto dropped;

	/* All wc fields are now known to be valid */