Commit e8223eef authored by Shay Agroskin's avatar Shay Agroskin Committed by Jakub Kicinski
Browse files

net: ena: use xdp_frame in XDP TX flow



Rename the ena_xdp_xmit_buff() function to ena_xdp_xmit_frame() and pass
it an xdp_frame struct instead of xdp_buff.
This change lays the ground for XDP redirect implementation which uses
xdp_frames when 'xmit'ing packets.

Signed-off-by: default avatarShay Agroskin <shayagr@amazon.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 89dd735e
Loading
Loading
Loading
Loading
+29 −24
Original line number Diff line number Diff line
@@ -233,9 +233,9 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
	return ret;
}

static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
				struct ena_tx_buffer *tx_info,
			       struct xdp_buff *xdp,
				struct xdp_frame *xdpf,
				void **push_hdr,
				u32 *push_len)
{
@@ -244,7 +244,7 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
	dma_addr_t dma = 0;
	u32 size;

	tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
	tx_info->xdpf = xdpf;
	size = tx_info->xdpf->len;
	ena_buf = tx_info->bufs;

@@ -281,29 +281,31 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
	return -EINVAL;
}

static int ena_xdp_xmit_buff(struct net_device *dev,
			     struct xdp_buff *xdp,
			     int qid,
			     struct ena_rx_buffer *rx_info)
static int ena_xdp_xmit_frame(struct net_device *dev,
			      struct xdp_frame *xdpf,
			      int qid)
{
	struct ena_adapter *adapter = netdev_priv(dev);
	struct ena_com_tx_ctx ena_tx_ctx = {};
	struct ena_tx_buffer *tx_info;
	struct ena_ring *xdp_ring;
	struct page *rx_buff_page;
	u16 next_to_use, req_id;
	int rc;
	void *push_hdr;
	u32 push_len;

	rx_buff_page = virt_to_page(xdpf->data);

	xdp_ring = &adapter->tx_ring[qid];
	next_to_use = xdp_ring->next_to_use;
	req_id = xdp_ring->free_ids[next_to_use];
	tx_info = &xdp_ring->tx_buffer_info[req_id];
	tx_info->num_of_bufs = 0;
	page_ref_inc(rx_info->page);
	tx_info->xdp_rx_page = rx_info->page;
	page_ref_inc(rx_buff_page);
	tx_info->xdp_rx_page = rx_buff_page;

	rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
	rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
	if (unlikely(rc))
		goto error_drop_packet;

@@ -318,7 +320,7 @@ static int ena_xdp_xmit_buff(struct net_device *dev,
			     tx_info,
			     &ena_tx_ctx,
			     next_to_use,
			     xdp->data_end - xdp->data);
			     xdpf->len);
	if (rc)
		goto error_unmap_dma;
	/* trigger the dma engine. ena_com_write_sq_doorbell()
@@ -337,12 +339,11 @@ static int ena_xdp_xmit_buff(struct net_device *dev,
	return NETDEV_TX_OK;
}

static int ena_xdp_execute(struct ena_ring *rx_ring,
			   struct xdp_buff *xdp,
			   struct ena_rx_buffer *rx_info)
static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{
	struct bpf_prog *xdp_prog;
	u32 verdict = XDP_PASS;
	struct xdp_frame *xdpf;
	u64 *xdp_stat;

	rcu_read_lock();
@@ -354,12 +355,16 @@ static int ena_xdp_execute(struct ena_ring *rx_ring,
	verdict = bpf_prog_run_xdp(xdp_prog, xdp);

	if (verdict == XDP_TX) {
		ena_xdp_xmit_buff(rx_ring->netdev,
				  xdp,
				  rx_ring->qid + rx_ring->adapter->num_io_queues,
				  rx_info);
		xdpf = xdp_convert_buff_to_frame(xdp);
		if (unlikely(!xdpf)) {
			trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
			xdp_stat = &rx_ring->rx_stats.xdp_aborted;
		} else {
			ena_xdp_xmit_frame(rx_ring->netdev, xdpf,
					   rx_ring->qid + rx_ring->adapter->num_io_queues);

			xdp_stat = &rx_ring->rx_stats.xdp_tx;
		}
	} else if (unlikely(verdict == XDP_ABORTED)) {
		trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
		xdp_stat = &rx_ring->rx_stats.xdp_aborted;
@@ -1521,7 +1526,7 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
	if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
		return XDP_DROP;

	ret = ena_xdp_execute(rx_ring, xdp, rx_info);
	ret = ena_xdp_execute(rx_ring, xdp);

	/* The xdp program might expand the headers */
	if (ret == XDP_PASS) {
@@ -1600,7 +1605,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
		if (unlikely(!skb)) {
			/* The page might not actually be freed here since the
			 * page reference count is incremented in
			 * ena_xdp_xmit_buff(), and it will be decreased only
			 * ena_xdp_xmit_frame(), and it will be decreased only
			 * when send completion was received from the device
			 */
			if (xdp_verdict == XDP_TX)