Commit 22174f79 authored by Heng Qi's avatar Heng Qi Committed by David S. Miller
Browse files

virtio-net: construct multi-buffer xdp in mergeable



Build multi-buffer xdp using virtnet_build_xdp_buff_mrg().

For the prefilled buffer before xdp is set, we will probably use
vq reset in the future. At the same time, virtio net currently
uses comp pages, and bpf_xdp_frags_increase_tail() needs to calculate
the tailroom of the last frag, which will involve the offset of the
corresponding page and cause a negative value, so we disable tail
increase by not setting xdp_rxq->frag_size.

Signed-off-by: default avatarHeng Qi <hengqi@linux.alibaba.com>
Reviewed-by: default avatarXuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ef75cb51
Loading
Loading
Loading
Loading
+44 −14
Original line number Diff line number Diff line
@@ -1043,7 +1043,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
	unsigned int metasize = 0;
	unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
	unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
	unsigned int frame_sz;
	unsigned int frame_sz, xdp_room;
	int err;

	head_skb = NULL;
@@ -1064,11 +1064,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
	rcu_read_lock();
	xdp_prog = rcu_dereference(rq->xdp_prog);
	if (xdp_prog) {
		unsigned int xdp_frags_truesz = 0;
		struct skb_shared_info *shinfo;
		struct xdp_frame *xdpf;
		struct page *xdp_page;
		struct xdp_buff xdp;
		void *data;
		u32 act;
		int i;

		/* Transient failure which in theory could occur if
		 * in-flight packets from before XDP was enabled reach
@@ -1084,14 +1087,16 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
		 */
		frame_sz = truesize;

		/* This happens when rx buffer size is underestimated
		 * or headroom is not enough because of the buffer
		 * was refilled before XDP is set. This should only
		 * happen for the first several packets, so we don't
		 * care much about its performance.
		/* This happens when headroom is not enough because
		 * of the buffer was prefilled before XDP is set.
		 * This should only happen for the first several packets.
		 * In fact, vq reset can be used here to help us clean up
		 * the prefilled buffers, but many existing devices do not
		 * support it, and we don't want to bother users who are
		 * using xdp normally.
		 */
		if (unlikely(num_buf > 1 ||
			     headroom < virtnet_get_headroom(vi))) {
		if (!xdp_prog->aux->xdp_has_frags &&
		    (num_buf > 1 || headroom < virtnet_get_headroom(vi))) {
			/* linearize data for XDP */
			xdp_page = xdp_linearize_page(rq, &num_buf,
						      page, offset,
@@ -1102,17 +1107,29 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
			if (!xdp_page)
				goto err_xdp;
			offset = VIRTIO_XDP_HEADROOM;
		} else if (unlikely(headroom < virtnet_get_headroom(vi))) {
			xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
						  sizeof(struct skb_shared_info));
			if (len + xdp_room > PAGE_SIZE)
				goto err_xdp;

			xdp_page = alloc_page(GFP_ATOMIC);
			if (!xdp_page)
				goto err_xdp;

			memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
			       page_address(page) + offset, len);
			frame_sz = PAGE_SIZE;
			offset = VIRTIO_XDP_HEADROOM;
		} else {
			xdp_page = page;
		}

		/* Allow consuming headroom but reserve enough space to push
		 * the descriptor on if we get an XDP_TX return code.
		 */
		data = page_address(xdp_page) + offset;
		xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
		xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
				 VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
		err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
						 &num_buf, &xdp_frags_truesz, stats);
		if (unlikely(err))
			goto err_xdp_frags;

		act = bpf_prog_run_xdp(xdp_prog, &xdp);
		stats->xdp_packets++;
@@ -1208,6 +1225,19 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
				__free_pages(xdp_page, 0);
			goto err_xdp;
		}
err_xdp_frags:
		if (unlikely(xdp_page != page))
			__free_pages(xdp_page, 0);

		if (xdp_buff_has_frags(&xdp)) {
			shinfo = xdp_get_shared_info_from_buff(&xdp);
			for (i = 0; i < shinfo->nr_frags; i++) {
				xdp_page = skb_frag_page(&shinfo->frags[i]);
				put_page(xdp_page);
			}
		}

		goto err_xdp;
	}
	rcu_read_unlock();