Commit 8c78c1e5 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Jakub Kicinski
Browse files

igc: add xdp frags support to ndo_xdp_xmit



Add the capability to map non-linear xdp frames in XDP_TX and
ndo_xdp_xmit callback.

Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Tested-by: default avatarNaama Meir <naamax.meir@linux.intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Link: https://lore.kernel.org/r/20220817173628.109102-1-anthony.l.nguyen@intel.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent bafe1adb
Loading
Loading
Loading
Loading
+83 −45
Original line number Diff line number Diff line
@@ -2129,65 +2129,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
	return ok;
}

static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
				  struct xdp_frame *xdpf,
				  struct igc_ring *ring)
/* This function requires __netif_tx_lock is held by the caller. */
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
				      struct xdp_frame *xdpf)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
	u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
	u16 count, index = ring->next_to_use;
	struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
	struct igc_tx_buffer *buffer = head;
	union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
	u32 olinfo_status, len = xdpf->len, cmd_type;
	void *data = xdpf->data;
	u16 i;

	count = TXD_USE_COUNT(len);
	for (i = 0; i < nr_frags; i++)
		count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));

	if (igc_maybe_stop_tx(ring, count + 3)) {
		/* this is a hard error */
		return -EBUSY;
	}

	i = 0;
	head->bytecount = xdp_get_frame_len(xdpf);
	head->type = IGC_TX_BUFFER_TYPE_XDP;
	head->gso_segs = 1;
	head->xdpf = xdpf;

	olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
	desc->read.olinfo_status = cpu_to_le32(olinfo_status);

	for (;;) {
		dma_addr_t dma;

	dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
		dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
		if (dma_mapping_error(ring->dev, dma)) {
		netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
		return -ENOMEM;
			netdev_err_once(ring->netdev,
					"Failed to map DMA for TX\n");
			goto unmap;
		}

	buffer->type = IGC_TX_BUFFER_TYPE_XDP;
	buffer->xdpf = xdpf;
	buffer->protocol = 0;
	buffer->bytecount = xdpf->len;
	buffer->gso_segs = 1;
	buffer->time_stamp = jiffies;
	dma_unmap_len_set(buffer, len, xdpf->len);
		dma_unmap_len_set(buffer, len, len);
		dma_unmap_addr_set(buffer, dma, dma);
	return 0;
}

/* This function requires __netif_tx_lock is held by the caller. */
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
				      struct xdp_frame *xdpf)
{
	struct igc_tx_buffer *buffer;
	union igc_adv_tx_desc *desc;
	u32 cmd_type, olinfo_status;
	int err;
		cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
			   IGC_ADVTXD_DCMD_IFCS | len;

	if (!igc_desc_unused(ring))
		return -EBUSY;
		desc->read.cmd_type_len = cpu_to_le32(cmd_type);
		desc->read.buffer_addr = cpu_to_le64(dma);

	buffer = &ring->tx_buffer_info[ring->next_to_use];
	err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
	if (err)
		return err;
		buffer->protocol = 0;

	cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
		   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
		   buffer->bytecount;
	olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
		if (++index == ring->count)
			index = 0;

	desc = IGC_TX_DESC(ring, ring->next_to_use);
	desc->read.cmd_type_len = cpu_to_le32(cmd_type);
	desc->read.olinfo_status = cpu_to_le32(olinfo_status);
	desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
		if (i == nr_frags)
			break;

	netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
		buffer = &ring->tx_buffer_info[index];
		desc = IGC_TX_DESC(ring, index);
		desc->read.olinfo_status = 0;

	buffer->next_to_watch = desc;
		data = skb_frag_address(&sinfo->frags[i]);
		len = skb_frag_size(&sinfo->frags[i]);
		i++;
	}
	desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);

	ring->next_to_use++;
	if (ring->next_to_use == ring->count)
		ring->next_to_use = 0;
	netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
	/* set the timestamp */
	head->time_stamp = jiffies;
	/* set next_to_watch value indicating a packet is present */
	head->next_to_watch = desc;
	ring->next_to_use = index;

	return 0;

unmap:
	for (;;) {
		buffer = &ring->tx_buffer_info[index];
		if (dma_unmap_len(buffer, len))
			dma_unmap_page(ring->dev,
				       dma_unmap_addr(buffer, dma),
				       dma_unmap_len(buffer, len),
				       DMA_TO_DEVICE);
		dma_unmap_len_set(buffer, len, 0);
		if (buffer == head)
			break;

		if (!index)
			index += ring->count;
		index--;
	}

	return -ENOMEM;
}

static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
@@ -2369,6 +2406,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
			xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
					 igc_rx_offset(rx_ring) + pkt_offset,
					 size, true);
			xdp_buff_clear_frags_flag(&xdp);

			skb = igc_xdp_run_prog(adapter, &xdp);
		}