Commit c41ced02 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Alexei Starovoitov
Browse files

net: mvneta: add frags support to XDP_TX



Introduce the capability to map non-linear xdp buffer running
mvneta_xdp_submit_frame() for XDP_TX and XDP_REDIRECT

Acked-by: default avatarToke Hoiland-Jorgensen <toke@redhat.com>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Link: https://lore.kernel.org/r/5d46ab63870ffe96fb95e6075a7ff0c81ef6424d.1642758637.git.lorenzo@kernel.org


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 7c48cb01
Loading
Loading
Loading
Loading
+76 −36
Original line number Diff line number Diff line
@@ -1884,8 +1884,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
			bytes_compl += buf->skb->len;
			pkts_compl++;
			dev_kfree_skb_any(buf->skb);
		} else if (buf->type == MVNETA_TYPE_XDP_TX ||
			   buf->type == MVNETA_TYPE_XDP_NDO) {
		} else if ((buf->type == MVNETA_TYPE_XDP_TX ||
			    buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) {
			if (napi && buf->type == MVNETA_TYPE_XDP_TX)
				xdp_return_frame_rx_napi(buf->xdpf);
			else
@@ -2079,47 +2079,87 @@ mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,

static int
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
			struct xdp_frame *xdpf, bool dma_map)
			struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map)
{
	struct mvneta_tx_desc *tx_desc;
	struct mvneta_tx_buf *buf;
	dma_addr_t dma_addr;
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
	struct device *dev = pp->dev->dev.parent;
	struct mvneta_tx_desc *tx_desc = NULL;
	int i, num_frames = 1;
	struct page *page;

	if (txq->count >= txq->tx_stop_threshold)
	if (unlikely(xdp_frame_has_frags(xdpf)))
		num_frames += sinfo->nr_frags;

	if (txq->count + num_frames >= txq->size)
		return MVNETA_XDP_DROPPED;

	tx_desc = mvneta_txq_next_desc_get(txq);
	for (i = 0; i < num_frames; i++) {
		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
		skb_frag_t *frag = NULL;
		int len = xdpf->len;
		dma_addr_t dma_addr;

		if (unlikely(i)) { /* paged area */
			frag = &sinfo->frags[i - 1];
			len = skb_frag_size(frag);
		}

	buf = &txq->buf[txq->txq_put_index];
		tx_desc = mvneta_txq_next_desc_get(txq);
		if (dma_map) {
			/* ndo_xdp_xmit */
		dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
					  xdpf->len, DMA_TO_DEVICE);
		if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
			void *data;

			data = unlikely(frag) ? skb_frag_address(frag)
					      : xdpf->data;
			dma_addr = dma_map_single(dev, data, len,
						  DMA_TO_DEVICE);
			if (dma_mapping_error(dev, dma_addr)) {
				mvneta_txq_desc_put(txq);
			return MVNETA_XDP_DROPPED;
				goto unmap;
			}

			buf->type = MVNETA_TYPE_XDP_NDO;
		} else {
		struct page *page = virt_to_page(xdpf->data);

		dma_addr = page_pool_get_dma_addr(page) +
			   sizeof(*xdpf) + xdpf->headroom;
		dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
					   xdpf->len, DMA_BIDIRECTIONAL);
			page = unlikely(frag) ? skb_frag_page(frag)
					      : virt_to_page(xdpf->data);
			dma_addr = page_pool_get_dma_addr(page);
			if (unlikely(frag))
				dma_addr += skb_frag_off(frag);
			else
				dma_addr += sizeof(*xdpf) + xdpf->headroom;
			dma_sync_single_for_device(dev, dma_addr, len,
						   DMA_BIDIRECTIONAL);
			buf->type = MVNETA_TYPE_XDP_TX;
		}
	buf->xdpf = xdpf;
		buf->xdpf = unlikely(i) ? NULL : xdpf;

	tx_desc->command = MVNETA_TXD_FLZ_DESC;
		tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC;
		tx_desc->buf_phys_addr = dma_addr;
	tx_desc->data_size = xdpf->len;
		tx_desc->data_size = len;
		*nxmit_byte += len;

		mvneta_txq_inc_put(txq);
	txq->pending++;
	txq->count++;
	}

	/*last descriptor */
	if (likely(tx_desc))
		tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;

	txq->pending += num_frames;
	txq->count += num_frames;

	return MVNETA_XDP_TX;

unmap:
	for (i--; i >= 0; i--) {
		mvneta_txq_desc_put(txq);
		tx_desc = txq->descs + txq->next_desc_to_proc;
		dma_unmap_single(dev, tx_desc->buf_phys_addr,
				 tx_desc->data_size,
				 DMA_TO_DEVICE);
	}

	return MVNETA_XDP_DROPPED;
}

static int
@@ -2128,8 +2168,8 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
	struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
	struct mvneta_tx_queue *txq;
	struct netdev_queue *nq;
	int cpu, nxmit_byte = 0;
	struct xdp_frame *xdpf;
	int cpu;
	u32 ret;

	xdpf = xdp_convert_buff_to_frame(xdp);
@@ -2141,10 +2181,10 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
	nq = netdev_get_tx_queue(pp->dev, txq->id);

	__netif_tx_lock(nq, cpu);
	ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
	ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
	if (ret == MVNETA_XDP_TX) {
		u64_stats_update_begin(&stats->syncp);
		stats->es.ps.tx_bytes += xdpf->len;
		stats->es.ps.tx_bytes += nxmit_byte;
		stats->es.ps.tx_packets++;
		stats->es.ps.xdp_tx++;
		u64_stats_update_end(&stats->syncp);
@@ -2183,11 +2223,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,

	__netif_tx_lock(nq, cpu);
	for (i = 0; i < num_frame; i++) {
		ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
		ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
					      true);
		if (ret != MVNETA_XDP_TX)
			break;

		nxmit_byte += frames[i]->len;
		nxmit++;
	}