Commit 832b2cb9 authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields
Browse files

svcrdma: Improve DMA mapping trace points



Capture the total size of Sends, the size of DMA map and the
matching DMA unmap to ensure operation is correct.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent c4b77edb
Loading
Loading
Loading
Loading
+23 −7
Original line number Diff line number Diff line
@@ -1498,31 +1498,47 @@ DEFINE_ERROR_EVENT(chunk);
 ** Server-side RDMA API events
 **/

TRACE_EVENT(svcrdma_dma_map_page,
DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
	TP_PROTO(
		const struct svcxprt_rdma *rdma,
		const void *page
		u64 dma_addr,
		u32 length
	),

	TP_ARGS(rdma, page),
	TP_ARGS(rdma, dma_addr, length),

	TP_STRUCT__entry(
		__field(const void *, page);
		__field(u64, dma_addr)
		__field(u32, length)
		__string(device, rdma->sc_cm_id->device->name)
		__string(addr, rdma->sc_xprt.xpt_remotebuf)
	),

	TP_fast_assign(
		__entry->page = page;
		__entry->dma_addr = dma_addr;
		__entry->length = length;
		__assign_str(device, rdma->sc_cm_id->device->name);
		__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
	),

	TP_printk("addr=%s device=%s page=%p",
		__get_str(addr), __get_str(device), __entry->page
	TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
		__get_str(addr), __get_str(device),
		__entry->dma_addr, __entry->length
	)
);

#define DEFINE_SVC_DMA_EVENT(name)					\
		DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name,	\
				TP_PROTO(				\
					const struct svcxprt_rdma *rdma,\
					u64 dma_addr,			\
					u32 length			\
				),					\
				TP_ARGS(rdma, dma_addr, length))

DEFINE_SVC_DMA_EVENT(dma_map_page);
DEFINE_SVC_DMA_EVENT(dma_unmap_page);

TRACE_EVENT(svcrdma_dma_map_rwctx,
	TP_PROTO(
		const struct svcxprt_rdma *rdma,
+6 −2
Original line number Diff line number Diff line
@@ -233,11 +233,15 @@ void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
	/* The first SGE contains the transport header, which
	 * remains mapped until @ctxt is destroyed.
	 */
	for (i = 1; i < ctxt->sc_send_wr.num_sge; i++)
	for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
		ib_dma_unmap_page(device,
				  ctxt->sc_sges[i].addr,
				  ctxt->sc_sges[i].length,
				  DMA_TO_DEVICE);
		trace_svcrdma_dma_unmap_page(rdma,
					     ctxt->sc_sges[i].addr,
					     ctxt->sc_sges[i].length);
	}

	for (i = 0; i < ctxt->sc_page_count; ++i)
		put_page(ctxt->sc_pages[i]);
@@ -490,6 +494,7 @@ static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
	dma_addr_t dma_addr;

	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
	trace_svcrdma_dma_map_page(rdma, dma_addr, len);
	if (ib_dma_mapping_error(dev, dma_addr))
		goto out_maperr;

@@ -499,7 +504,6 @@ static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
	return 0;

out_maperr:
	trace_svcrdma_dma_map_page(rdma, page);
	return -EIO;
}