Commit 4a7f6c5a authored by Robert-Ionut Alexa's avatar Robert-Ionut Alexa Committed by David S. Miller
Browse files

net: dpaa2-eth: AF_XDP TX zero copy support



Add support in dpaa2-eth for packet processing on the Tx path using
AF_XDP zero copy mode.

The newly added dpaa2_xsk_tx() function will handle enqueuing AF_XDP Tx
packets into the appropriate queue and update any necessary statistics.

On a more detailed note, the dpaa2_xsk_tx_build_fd() function handles
creating a Scatter-Gather frame descriptor with only one data buffer.
This is needed because otherwise we would need to impose a headroom in
the Tx buffer to store our software annotation structures.
This tactic is already used on the normal data path of the dpaa2-eth
driver, thus we are reusing the dpaa2_eth_sgt_get/dpaa2_eth_sgt_recycle
functions in order to allocate and recycle the Scatter-Gather table
buffers.

In case we have reached the maximum number of Tx XSK packets to be sent
in a NAPI cycle, we'll exit the dpaa2_eth_poll() and hope to be
rescheduled again.

On the XSK Tx confirmation path, we are just unmapping the SGT buffer
and recycle it for further use.

Signed-off-by: default avatarRobert-Ionut Alexa <robert-ionut.alexa@nxp.com>
Signed-off-by: default avatarIoana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 48276c08
Loading
Loading
Loading
Loading
+39 −9
Original line number Diff line number Diff line
@@ -858,7 +858,7 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
	}
}

static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
{
	struct dpaa2_eth_sgt_cache *sgt_cache;
	void *sgt_buf = NULL;
@@ -880,7 +880,7 @@ static void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
	return sgt_buf;
}

static void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
{
	struct dpaa2_eth_sgt_cache *sgt_cache;

@@ -1115,7 +1115,8 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
 * dpaa2_eth_tx().
 */
static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
			  struct dpaa2_eth_channel *ch,
			  struct dpaa2_eth_fq *fq,
			  const struct dpaa2_fd *fd, bool in_napi)
{
@@ -1184,6 +1185,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,

			if (!swa->tso.is_last_fd)
				should_free_skb = 0;
		} else if (swa->type == DPAA2_ETH_SWA_XSK) {
			/* Unmap the SGT Buffer */
			dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
					 DMA_BIDIRECTIONAL);
		} else {
			skb = swa->single.skb;

@@ -1201,6 +1206,12 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
		return;
	}

	if (swa->type == DPAA2_ETH_SWA_XSK) {
		ch->xsk_tx_pkts_sent++;
		dpaa2_eth_sgt_recycle(priv, buffer_start);
		return;
	}

	if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
		fq->dq_frames++;
		fq->dq_bytes += fd_len;
@@ -1375,7 +1386,7 @@ static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
err_sgt_get:
	/* Free all the other FDs that were already fully created */
	for (i = 0; i < index; i++)
		dpaa2_eth_free_tx_fd(priv, NULL, &fd_start[i], false);
		dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);

	return err;
}
@@ -1491,7 +1502,7 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
	if (unlikely(err < 0)) {
		percpu_stats->tx_errors++;
		/* Clean up everything, including freeing the skb */
		dpaa2_eth_free_tx_fd(priv, fq, fd, false);
		dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false);
		netdev_tx_completed_queue(nq, 1, fd_len);
	} else {
		percpu_stats->tx_packets += total_enqueued;
@@ -1584,7 +1595,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,

	/* Check frame errors in the FD field */
	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
	dpaa2_eth_free_tx_fd(priv, fq, fd, true);
	dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);

	if (likely(!fd_errors))
		return;
@@ -1929,6 +1940,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
	struct dpaa2_eth_fq *fq, *txc_fq = NULL;
	struct netdev_queue *nq;
	int store_cleaned, work_done;
	bool work_done_zc = false;
	struct list_head rx_list;
	int retries = 0;
	u16 flowid;
@@ -1941,6 +1953,15 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
	INIT_LIST_HEAD(&rx_list);
	ch->rx_list = &rx_list;

	if (ch->xsk_zc) {
		work_done_zc = dpaa2_xsk_tx(priv, ch);
		/* If we reached the XSK Tx per NAPI threshold, we're done */
		if (work_done_zc) {
			work_done = budget;
			goto out;
		}
	}

	do {
		err = dpaa2_eth_pull_channel(ch);
		if (unlikely(err))
@@ -1993,6 +2014,11 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
out:
	netif_receive_skb_list(ch->rx_list);

	if (ch->xsk_tx_pkts_sent) {
		xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
		ch->xsk_tx_pkts_sent = 0;
	}

	if (txc_fq && txc_fq->dq_frames) {
		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
@@ -2989,6 +3015,10 @@ static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
	/* Update NAPI statistics */
	ch->stats.cdan++;

	/* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed
	 * so that it can be rescheduled again.
	 */
	if (!napi_if_scheduled_mark_missed(&ch->napi))
		napi_schedule(&ch->napi);
}

+21 −1
Original line number Diff line number Diff line
@@ -53,6 +53,12 @@
 */
#define DPAA2_ETH_TXCONF_PER_NAPI	256

/* Maximum number of Tx frames to be processed in a single NAPI
 * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI
 * to maximize the throughput.
 */
#define DPAA2_ETH_TX_ZC_PER_NAPI	DPAA2_ETH_TXCONF_PER_NAPI

/* Buffer qouta per channel. We want to keep in check number of ingress frames
 * in flight: for small sized frames, congestion group taildrop may kick in
 * first; for large sizes, Rx FQ taildrop threshold will ensure only a
@@ -154,6 +160,7 @@ struct dpaa2_eth_swa {
		} xdp;
		struct {
			struct xdp_buff *xdp_buff;
			int sgt_size;
		} xsk;
		struct {
			struct sk_buff *skb;
@@ -495,6 +502,7 @@ struct dpaa2_eth_channel {
	int recycled_bufs_cnt;

	bool xsk_zc;
	int xsk_tx_pkts_sent;
	struct xsk_buff_pool *xsk_pool;
	struct dpaa2_eth_bp *bp;
};
@@ -531,7 +539,7 @@ struct dpaa2_eth_trap_data {

#define DPAA2_ETH_DEFAULT_COPYBREAK	512

#define DPAA2_ETH_ENQUEUE_MAX_FDS	200
#define DPAA2_ETH_ENQUEUE_MAX_FDS	256
struct dpaa2_eth_fds {
	struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
};
@@ -836,4 +844,16 @@ void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);

void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
			  struct dpaa2_eth_channel *ch,
			  struct dpaa2_eth_fq *fq,
			  const struct dpaa2_fd *fd, bool in_napi);
bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
		  struct dpaa2_eth_channel *ch);

/* SGT (Scatter-Gather Table) cache management */
void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv);

void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf);

#endif	/* __DPAA2_H */
+123 −0
Original line number Diff line number Diff line
@@ -196,6 +196,7 @@ static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid)

	ch->xsk_zc = false;
	ch->xsk_pool = NULL;
	ch->xsk_tx_pkts_sent = 0;
	ch->bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];

	dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_eth_rx);
@@ -325,3 +326,125 @@ int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)

	return 0;
}

static int dpaa2_xsk_tx_build_fd(struct dpaa2_eth_priv *priv,
				 struct dpaa2_eth_channel *ch,
				 struct dpaa2_fd *fd,
				 struct xdp_desc *xdp_desc)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpaa2_sg_entry *sgt;
	struct dpaa2_eth_swa *swa;
	void *sgt_buf = NULL;
	dma_addr_t sgt_addr;
	int sgt_buf_size;
	dma_addr_t addr;
	int err = 0;

	/* Prepare the HW SGT structure */
	sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
	sgt_buf = dpaa2_eth_sgt_get(priv);
	if (unlikely(!sgt_buf))
		return -ENOMEM;
	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);

	/* Get the address of the XSK Tx buffer */
	addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr);
	xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len);

	/* Fill in the HW SGT structure */
	dpaa2_sg_set_addr(sgt, addr);
	dpaa2_sg_set_len(sgt, xdp_desc->len);
	dpaa2_sg_set_final(sgt, true);

	/* Store the necessary info in the SGT buffer */
	swa = (struct dpaa2_eth_swa *)sgt_buf;
	swa->type = DPAA2_ETH_SWA_XSK;
	swa->xsk.sgt_size = sgt_buf_size;

	/* Separately map the SGT buffer */
	sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
	if (unlikely(dma_mapping_error(dev, sgt_addr))) {
		err = -ENOMEM;
		goto sgt_map_failed;
	}

	/* Initialize FD fields */
	memset(fd, 0, sizeof(struct dpaa2_fd));
	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
	dpaa2_fd_set_addr(fd, sgt_addr);
	dpaa2_fd_set_len(fd, xdp_desc->len);
	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);

	return 0;

sgt_map_failed:
	dpaa2_eth_sgt_recycle(priv, sgt_buf);

	return err;
}

bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
		  struct dpaa2_eth_channel *ch)
{
	struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs;
	struct dpaa2_eth_drv_stats *percpu_extras;
	struct rtnl_link_stats64 *percpu_stats;
	int budget = DPAA2_ETH_TX_ZC_PER_NAPI;
	int total_enqueued, enqueued;
	int retries, max_retries;
	struct dpaa2_eth_fq *fq;
	struct dpaa2_fd *fds;
	int batch, i, err;

	percpu_stats = this_cpu_ptr(priv->percpu_stats);
	percpu_extras = this_cpu_ptr(priv->percpu_extras);
	fds = (this_cpu_ptr(priv->fd))->array;

	/* Use the FQ with the same idx as the affine CPU */
	fq = &priv->fq[ch->nctx.desired_cpu];

	batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget);
	if (!batch)
		return false;

	/* Create a FD for each XSK frame to be sent */
	for (i = 0; i < batch; i++) {
		err = dpaa2_xsk_tx_build_fd(priv, ch, &fds[i], &xdp_descs[i]);
		if (err) {
			batch = i;
			break;
		}
	}

	/* Enqueue all the created FDs */
	max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES;
	total_enqueued = 0;
	enqueued = 0;
	retries = 0;
	while (total_enqueued < batch && retries < max_retries) {
		err = priv->enqueue(priv, fq, &fds[total_enqueued], 0,
				    batch - total_enqueued, &enqueued);
		if (err == -EBUSY) {
			retries++;
			continue;
		}

		total_enqueued += enqueued;
	}
	percpu_extras->tx_portal_busy += retries;

	/* Update statistics */
	percpu_stats->tx_packets += total_enqueued;
	for (i = 0; i < total_enqueued; i++)
		percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
	for (i = total_enqueued; i < batch; i++) {
		dpaa2_eth_free_tx_fd(priv, ch, fq, &fds[i], false);
		percpu_stats->tx_errors++;
	}

	xsk_tx_release(ch->xsk_pool);

	return total_enqueued == budget ? true : false;
}