Commit c5f3e72f authored by Xuan Zhuo's avatar Xuan Zhuo Committed by Jakub Kicinski
Browse files

virtio_net: introduce receive_small_xdp()



The purpose of this patch is to simplify the receive_small().
Separate all the logic of XDP of small into a function.

Signed-off-by: default avatarXuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 59ba3b1a
Loading
Loading
Loading
Loading
+100 −65
Original line number Diff line number Diff line
@@ -931,52 +931,35 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
	return NULL;
}

static struct sk_buff *receive_small(struct net_device *dev,
static struct sk_buff *receive_small_xdp(struct net_device *dev,
					 struct virtnet_info *vi,
					 struct receive_queue *rq,
				     void *buf, void *ctx,
					 struct bpf_prog *xdp_prog,
					 void *buf,
					 unsigned int xdp_headroom,
					 unsigned int len,
					 unsigned int *xdp_xmit,
					 struct virtnet_rq_stats *stats)
{
	struct sk_buff *skb;
	struct bpf_prog *xdp_prog;
	unsigned int xdp_headroom = (unsigned long)ctx;
	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
	unsigned int headroom = vi->hdr_len + header_offset;
	unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
	struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
	struct page *page = virt_to_head_page(buf);
	unsigned int delta = 0;
	struct page *xdp_page;
	unsigned int metasize = 0;

	len -= vi->hdr_len;
	stats->bytes += len;

	if (unlikely(len > GOOD_PACKET_LEN)) {
		pr_debug("%s: rx error: len %u exceeds max size %d\n",
			 dev->name, len, GOOD_PACKET_LEN);
		dev->stats.rx_length_errors++;
		goto err;
	}

	if (likely(!vi->xdp_enabled)) {
		xdp_prog = NULL;
		goto skip_xdp;
	}

	rcu_read_lock();
	xdp_prog = rcu_dereference(rq->xdp_prog);
	if (xdp_prog) {
		struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
	unsigned int buflen;
	struct xdp_buff xdp;
	struct sk_buff *skb;
	unsigned int delta = 0;
	unsigned int metasize = 0;
	void *orig_data;
	u32 act;

	if (unlikely(hdr->hdr.gso_type))
		goto err_xdp;

	buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));

	if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
		int offset = buf - page_address(page) + header_offset;
		unsigned int tlen = len + vi->hdr_len;
@@ -1012,34 +995,27 @@ static struct sk_buff *receive_small(struct net_device *dev,
		len = xdp.data_end - xdp.data;
		metasize = xdp.data - xdp.data_meta;
		break;

	case XDP_TX:
	case XDP_REDIRECT:
			rcu_read_unlock();
		goto xdp_xmit;

	default:
		goto err_xdp;
	}
	}
	rcu_read_unlock();

skip_xdp:
	skb = build_skb(buf, buflen);
	if (!skb)
		goto err;

	skb_reserve(skb, headroom - delta);
	skb_put(skb, len);
	if (!xdp_prog) {
		buf += header_offset;
		memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
	} /* keep zeroed vnet hdr since XDP is loaded */

	if (metasize)
		skb_metadata_set(skb, metasize);

	return skb;

err_xdp:
	rcu_read_unlock();
	stats->xdp_drops++;
err:
	stats->drops++;
@@ -1048,6 +1024,65 @@ static struct sk_buff *receive_small(struct net_device *dev,
	return NULL;
}

static struct sk_buff *receive_small(struct net_device *dev,
				     struct virtnet_info *vi,
				     struct receive_queue *rq,
				     void *buf, void *ctx,
				     unsigned int len,
				     unsigned int *xdp_xmit,
				     struct virtnet_rq_stats *stats)
{
	struct sk_buff *skb;
	struct bpf_prog *xdp_prog;
	unsigned int xdp_headroom = (unsigned long)ctx;
	unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
	unsigned int headroom = vi->hdr_len + header_offset;
	unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
	struct page *page = virt_to_head_page(buf);

	len -= vi->hdr_len;
	stats->bytes += len;

	if (unlikely(len > GOOD_PACKET_LEN)) {
		pr_debug("%s: rx error: len %u exceeds max size %d\n",
			 dev->name, len, GOOD_PACKET_LEN);
		dev->stats.rx_length_errors++;
		goto err;
	}

	if (likely(!vi->xdp_enabled)) {
		xdp_prog = NULL;
		goto skip_xdp;
	}

	rcu_read_lock();
	xdp_prog = rcu_dereference(rq->xdp_prog);
	if (xdp_prog) {
		skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf, xdp_headroom,
					len, xdp_xmit, stats);
		rcu_read_unlock();
		return skb;
	}
	rcu_read_unlock();

skip_xdp:
	skb = build_skb(buf, buflen);
	if (!skb)
		goto err;
	skb_reserve(skb, headroom);
	skb_put(skb, len);

	buf += header_offset;
	memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
	return skb;

err:
	stats->drops++;
	put_page(page);
	return NULL;
}

static struct sk_buff *receive_big(struct net_device *dev,
				   struct virtnet_info *vi,
				   struct receive_queue *rq,