Commit a318c70a authored by Shay Agroskin's avatar Shay Agroskin Committed by Jakub Kicinski
Browse files

net: ena: introduce XDP redirect implementation



This patch adds a partial support for the XDP_REDIRECT directive which
instructs the driver to pass the packet to an interface specified by the
program. The directive is passed to the driver by calling bpf_redirect()
or bpf_redirect_map() functions from the eBPF program.

To lay the ground for integration with the existing XDP TX
implementation the patch removes the redundant page ref count increase
in ena_xdp_xmit_frame() and then decrease in ena_clean_rx_irq(). Instead
it only DMA unmaps descriptors for which XDP TX or REDIRECT directive
was received.

The XDP Redirect support is still missing .ndo_xdp_xmit function
implementation, which allows to redirect packet to an ENA interface,
which would be added in a later patch.

Signed-off-by: default avatarShay Agroskin <shayagr@amazon.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent e8223eef
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -95,6 +95,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
	ENA_STAT_RX_ENTRY(xdp_pass),
	ENA_STAT_RX_ENTRY(xdp_tx),
	ENA_STAT_RX_ENTRY(xdp_invalid),
	ENA_STAT_RX_ENTRY(xdp_redirect),
};

static const struct ena_stats ena_stats_ena_com_strings[] = {
+51 −32
Original line number Diff line number Diff line
@@ -289,21 +289,17 @@ static int ena_xdp_xmit_frame(struct net_device *dev,
	struct ena_com_tx_ctx ena_tx_ctx = {};
	struct ena_tx_buffer *tx_info;
	struct ena_ring *xdp_ring;
	struct page *rx_buff_page;
	u16 next_to_use, req_id;
	int rc;
	void *push_hdr;
	u32 push_len;

	rx_buff_page = virt_to_page(xdpf->data);

	xdp_ring = &adapter->tx_ring[qid];
	next_to_use = xdp_ring->next_to_use;
	req_id = xdp_ring->free_ids[next_to_use];
	tx_info = &xdp_ring->tx_buffer_info[req_id];
	tx_info->num_of_bufs = 0;
	page_ref_inc(rx_buff_page);
	tx_info->xdp_rx_page = rx_buff_page;
	tx_info->xdp_rx_page = virt_to_page(xdpf->data);

	rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
	if (unlikely(rc))
@@ -335,7 +331,7 @@ static int ena_xdp_xmit_frame(struct net_device *dev,
	ena_unmap_tx_buff(xdp_ring, tx_info);
	tx_info->xdpf = NULL;
error_drop_packet:
	__free_page(tx_info->xdp_rx_page);
	xdp_return_frame(xdpf);
	return NETDEV_TX_OK;
}

@@ -354,25 +350,36 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)

	verdict = bpf_prog_run_xdp(xdp_prog, xdp);

	if (verdict == XDP_TX) {
	switch (verdict) {
	case XDP_TX:
		xdpf = xdp_convert_buff_to_frame(xdp);
		if (unlikely(!xdpf)) {
			trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
			xdp_stat = &rx_ring->rx_stats.xdp_aborted;
		} else {
			break;
		}

		ena_xdp_xmit_frame(rx_ring->netdev, xdpf,
				   rx_ring->qid + rx_ring->adapter->num_io_queues);

		xdp_stat = &rx_ring->rx_stats.xdp_tx;
		break;
	case XDP_REDIRECT:
		if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
			xdp_stat = &rx_ring->rx_stats.xdp_redirect;
			break;
		}
	} else if (unlikely(verdict == XDP_ABORTED)) {
		fallthrough;
	case XDP_ABORTED:
		trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
		xdp_stat = &rx_ring->rx_stats.xdp_aborted;
	} else if (unlikely(verdict == XDP_DROP)) {
		break;
	case XDP_DROP:
		xdp_stat = &rx_ring->rx_stats.xdp_drop;
	} else if (unlikely(verdict == XDP_PASS)) {
		break;
	case XDP_PASS:
		xdp_stat = &rx_ring->rx_stats.xdp_pass;
	} else {
		break;
	default:
		bpf_warn_invalid_xdp_action(verdict);
		xdp_stat = &rx_ring->rx_stats.xdp_invalid;
	}
@@ -958,11 +965,20 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
	return 0;
}

static void ena_unmap_rx_buff(struct ena_ring *rx_ring,
			      struct ena_rx_buffer *rx_info)
{
	struct ena_com_buf *ena_buf = &rx_info->ena_buf;

	dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
		       ENA_PAGE_SIZE,
		       DMA_BIDIRECTIONAL);
}

static void ena_free_rx_page(struct ena_ring *rx_ring,
			     struct ena_rx_buffer *rx_info)
{
	struct page *page = rx_info->page;
	struct ena_com_buf *ena_buf = &rx_info->ena_buf;

	if (unlikely(!page)) {
		netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
@@ -970,9 +986,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
		return;
	}

	dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
		       ENA_PAGE_SIZE,
		       DMA_BIDIRECTIONAL);
	ena_unmap_rx_buff(rx_ring, rx_info);

	__free_page(page);
	rx_info->page = NULL;
@@ -1396,9 +1410,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
		return NULL;

	do {
		dma_unmap_page(rx_ring->dev,
			       dma_unmap_addr(&rx_info->ena_buf, paddr),
			       ENA_PAGE_SIZE, DMA_BIDIRECTIONAL);
		ena_unmap_rx_buff(rx_ring, rx_info);

		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
				rx_info->page_offset, len, ENA_PAGE_SIZE);
@@ -1556,6 +1568,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
	struct sk_buff *skb;
	int refill_required;
	struct xdp_buff xdp;
	int xdp_flags = 0;
	int total_len = 0;
	int xdp_verdict;
	int rc = 0;
@@ -1603,22 +1616,25 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
					 &next_to_clean);

		if (unlikely(!skb)) {
			/* The page might not actually be freed here since the
			 * page reference count is incremented in
			 * ena_xdp_xmit_frame(), and it will be decreased only
			 * when send completion was received from the device
			 */
			if (xdp_verdict == XDP_TX)
				ena_free_rx_page(rx_ring,
						 &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
			for (i = 0; i < ena_rx_ctx.descs; i++) {
				rx_ring->free_ids[next_to_clean] =
					rx_ring->ena_bufs[i].req_id;
				int req_id = rx_ring->ena_bufs[i].req_id;

				rx_ring->free_ids[next_to_clean] = req_id;
				next_to_clean =
					ENA_RX_RING_IDX_NEXT(next_to_clean,
							     rx_ring->ring_size);

				/* Packets was passed for transmission, unmap it
				 * from RX side.
				 */
				if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
					ena_unmap_rx_buff(rx_ring,
							  &rx_ring->rx_buffer_info[req_id]);
					rx_ring->rx_buffer_info[req_id].page = NULL;
				}
			}
			if (xdp_verdict != XDP_PASS) {
				xdp_flags |= xdp_verdict;
				res_budget--;
				continue;
			}
@@ -1664,6 +1680,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
		ena_refill_rx_bufs(rx_ring, refill_required);
	}

	if (xdp_flags & XDP_REDIRECT)
		xdp_do_flush_map();

	return work_done;

error:
+1 −0
Original line number Diff line number Diff line
@@ -239,6 +239,7 @@ struct ena_stats_rx {
	u64 xdp_pass;
	u64 xdp_tx;
	u64 xdp_invalid;
	u64 xdp_redirect;
};

struct ena_ring {