Commit 71dc9ec9 authored by Bobby Eshleman's avatar Bobby Eshleman Committed by David S. Miller
Browse files

virtio/vsock: replace virtio_vsock_pkt with sk_buff



This commit changes virtio/vsock to use sk_buff instead of
virtio_vsock_pkt. Beyond better conforming to other net code, using
sk_buff allows vsock to use sk_buff-dependent features in the future
(such as sockmap) and improves throughput.

This patch introduces the following performance changes:

Tool: Uperf
Env: Phys Host + L1 Guest
Payload: 64k
Threads: 16
Test Runs: 10
Type: SOCK_STREAM
Before: commit b7bfaa76 ("Linux 6.2-rc3")

Before
------
g2h: 16.77Gb/s
h2g: 10.56Gb/s

After
-----
g2h: 21.04Gb/s
h2g: 10.76Gb/s

Signed-off-by: default avatarBobby Eshleman <bobby.eshleman@bytedance.com>
Reviewed-by: default avatarStefano Garzarella <sgarzare@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5ef2702a
Loading
Loading
Loading
Loading
+89 −125
Original line number Diff line number Diff line
@@ -51,8 +51,7 @@ struct vhost_vsock {
	struct hlist_node hash;

	struct vhost_work send_pkt_work;
	spinlock_t send_pkt_list_lock;
	struct list_head send_pkt_list;	/* host->guest pending packets */
	struct sk_buff_head send_pkt_queue; /* host->guest pending packets */

	atomic_t queued_replies;

@@ -108,40 +107,31 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
	vhost_disable_notify(&vsock->dev, vq);

	do {
		struct virtio_vsock_pkt *pkt;
		struct virtio_vsock_hdr *hdr;
		size_t iov_len, payload_len;
		struct iov_iter iov_iter;
		u32 flags_to_restore = 0;
		struct sk_buff *skb;
		unsigned out, in;
		size_t nbytes;
		size_t iov_len, payload_len;
		int head;
		u32 flags_to_restore = 0;

		spin_lock_bh(&vsock->send_pkt_list_lock);
		if (list_empty(&vsock->send_pkt_list)) {
			spin_unlock_bh(&vsock->send_pkt_list_lock);
		skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);

		if (!skb) {
			vhost_enable_notify(&vsock->dev, vq);
			break;
		}

		pkt = list_first_entry(&vsock->send_pkt_list,
				       struct virtio_vsock_pkt, list);
		list_del_init(&pkt->list);
		spin_unlock_bh(&vsock->send_pkt_list_lock);

		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
					 &out, &in, NULL, NULL);
		if (head < 0) {
			spin_lock_bh(&vsock->send_pkt_list_lock);
			list_add(&pkt->list, &vsock->send_pkt_list);
			spin_unlock_bh(&vsock->send_pkt_list_lock);
			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
			break;
		}

		if (head == vq->num) {
			spin_lock_bh(&vsock->send_pkt_list_lock);
			list_add(&pkt->list, &vsock->send_pkt_list);
			spin_unlock_bh(&vsock->send_pkt_list_lock);

			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
			/* We cannot finish yet if more buffers snuck in while
			 * re-enabling notify.
			 */
@@ -153,26 +143,27 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
		}

		if (out) {
			virtio_transport_free_pkt(pkt);
			kfree_skb(skb);
			vq_err(vq, "Expected 0 output buffers, got %u\n", out);
			break;
		}

		iov_len = iov_length(&vq->iov[out], in);
		if (iov_len < sizeof(pkt->hdr)) {
			virtio_transport_free_pkt(pkt);
		if (iov_len < sizeof(*hdr)) {
			kfree_skb(skb);
			vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
			break;
		}

		iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
		payload_len = pkt->len - pkt->off;
		payload_len = skb->len;
		hdr = virtio_vsock_hdr(skb);

		/* If the packet is greater than the space available in the
		 * buffer, we split it using multiple buffers.
		 */
		if (payload_len > iov_len - sizeof(pkt->hdr)) {
			payload_len = iov_len - sizeof(pkt->hdr);
		if (payload_len > iov_len - sizeof(*hdr)) {
			payload_len = iov_len - sizeof(*hdr);

			/* As we are copying pieces of large packet's buffer to
			 * small rx buffers, headers of packets in rx queue are
@@ -185,31 +176,30 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
			 * bits set. After initialized header will be copied to
			 * rx buffer, these required bits will be restored.
			 */
			if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
				pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
			if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
				hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
				flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;

				if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
					pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
				if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
					hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
					flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
				}
			}
		}

		/* Set the correct length in the header */
		pkt->hdr.len = cpu_to_le32(payload_len);
		hdr->len = cpu_to_le32(payload_len);

		nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
		if (nbytes != sizeof(pkt->hdr)) {
			virtio_transport_free_pkt(pkt);
		nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
		if (nbytes != sizeof(*hdr)) {
			kfree_skb(skb);
			vq_err(vq, "Faulted on copying pkt hdr\n");
			break;
		}

		nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
				      &iov_iter);
		nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
		if (nbytes != payload_len) {
			virtio_transport_free_pkt(pkt);
			kfree_skb(skb);
			vq_err(vq, "Faulted on copying pkt buf\n");
			break;
		}
@@ -217,31 +207,28 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
		/* Deliver to monitoring devices all packets that we
		 * will transmit.
		 */
		virtio_transport_deliver_tap_pkt(pkt);
		virtio_transport_deliver_tap_pkt(skb);

		vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
		vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
		added = true;

		pkt->off += payload_len;
		skb_pull(skb, payload_len);
		total_len += payload_len;

		/* If we didn't send all the payload we can requeue the packet
		 * to send it with the next available buffer.
		 */
		if (pkt->off < pkt->len) {
			pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
		if (skb->len > 0) {
			hdr->flags |= cpu_to_le32(flags_to_restore);

			/* We are queueing the same virtio_vsock_pkt to handle
			/* We are queueing the same skb to handle
			 * the remaining bytes, and we want to deliver it
			 * to monitoring devices in the next iteration.
			 */
			pkt->tap_delivered = false;

			spin_lock_bh(&vsock->send_pkt_list_lock);
			list_add(&pkt->list, &vsock->send_pkt_list);
			spin_unlock_bh(&vsock->send_pkt_list_lock);
			virtio_vsock_skb_clear_tap_delivered(skb);
			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
		} else {
			if (pkt->reply) {
			if (virtio_vsock_skb_reply(skb)) {
				int val;

				val = atomic_dec_return(&vsock->queued_replies);
@@ -253,7 +240,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
					restart_tx = true;
			}

			virtio_transport_free_pkt(pkt);
			consume_skb(skb);
		}
	} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
	if (added)
@@ -278,28 +265,26 @@ static void vhost_transport_send_pkt_work(struct vhost_work *work)
}

static int
vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
vhost_transport_send_pkt(struct sk_buff *skb)
{
	struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
	struct vhost_vsock *vsock;
	int len = pkt->len;
	int len = skb->len;

	rcu_read_lock();

	/* Find the vhost_vsock according to guest context id  */
	vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
	vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
	if (!vsock) {
		rcu_read_unlock();
		virtio_transport_free_pkt(pkt);
		kfree_skb(skb);
		return -ENODEV;
	}

	if (pkt->reply)
	if (virtio_vsock_skb_reply(skb))
		atomic_inc(&vsock->queued_replies);

	spin_lock_bh(&vsock->send_pkt_list_lock);
	list_add_tail(&pkt->list, &vsock->send_pkt_list);
	spin_unlock_bh(&vsock->send_pkt_list_lock);

	virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
	vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);

	rcu_read_unlock();
@@ -310,10 +295,8 @@ static int
vhost_transport_cancel_pkt(struct vsock_sock *vsk)
{
	struct vhost_vsock *vsock;
	struct virtio_vsock_pkt *pkt, *n;
	int cnt = 0;
	int ret = -ENODEV;
	LIST_HEAD(freeme);

	rcu_read_lock();

@@ -322,20 +305,7 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
	if (!vsock)
		goto out;

	spin_lock_bh(&vsock->send_pkt_list_lock);
	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
		if (pkt->vsk != vsk)
			continue;
		list_move(&pkt->list, &freeme);
	}
	spin_unlock_bh(&vsock->send_pkt_list_lock);

	list_for_each_entry_safe(pkt, n, &freeme, list) {
		if (pkt->reply)
			cnt++;
		list_del(&pkt->list);
		virtio_transport_free_pkt(pkt);
	}
	cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);

	if (cnt) {
		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
@@ -352,12 +322,14 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
	return ret;
}

static struct virtio_vsock_pkt *
vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
static struct sk_buff *
vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
		      unsigned int out, unsigned int in)
{
	struct virtio_vsock_pkt *pkt;
	struct virtio_vsock_hdr *hdr;
	struct iov_iter iov_iter;
	struct sk_buff *skb;
	size_t payload_len;
	size_t nbytes;
	size_t len;

@@ -366,50 +338,48 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
		return NULL;
	}

	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
	if (!pkt)
	len = iov_length(vq->iov, out);

	/* len contains both payload and hdr */
	skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
	if (!skb)
		return NULL;

	len = iov_length(vq->iov, out);
	iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);

	nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
	if (nbytes != sizeof(pkt->hdr)) {
	hdr = virtio_vsock_hdr(skb);
	nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
	if (nbytes != sizeof(*hdr)) {
		vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
		       sizeof(pkt->hdr), nbytes);
		kfree(pkt);
		       sizeof(*hdr), nbytes);
		kfree_skb(skb);
		return NULL;
	}

	pkt->len = le32_to_cpu(pkt->hdr.len);
	payload_len = le32_to_cpu(hdr->len);

	/* No payload */
	if (!pkt->len)
		return pkt;

	/* The pkt is too big */
	if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
		kfree(pkt);
		return NULL;
	}
	if (!payload_len)
		return skb;

	pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
	if (!pkt->buf) {
		kfree(pkt);
	/* The pkt is too big or the length in the header is invalid */
	if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
	    payload_len + sizeof(*hdr) > len) {
		kfree_skb(skb);
		return NULL;
	}

	pkt->buf_len = pkt->len;
	virtio_vsock_skb_rx_put(skb);

	nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
	if (nbytes != pkt->len) {
		vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
		       pkt->len, nbytes);
		virtio_transport_free_pkt(pkt);
	nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
	if (nbytes != payload_len) {
		vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
		       payload_len, nbytes);
		kfree_skb(skb);
		return NULL;
	}

	return pkt;
	return skb;
}

/* Is there space left for replies to rx packets? */
@@ -496,9 +466,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
						  poll.work);
	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
						 dev);
	struct virtio_vsock_pkt *pkt;
	int head, pkts = 0, total_len = 0;
	unsigned int out, in;
	struct sk_buff *skb;
	bool added = false;

	mutex_lock(&vq->mutex);
@@ -511,6 +481,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)

	vhost_disable_notify(&vsock->dev, vq);
	do {
		struct virtio_vsock_hdr *hdr;

		if (!vhost_vsock_more_replies(vsock)) {
			/* Stop tx until the device processes already
			 * pending replies.  Leave tx virtqueue
@@ -532,24 +504,26 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
			break;
		}

		pkt = vhost_vsock_alloc_pkt(vq, out, in);
		if (!pkt) {
		skb = vhost_vsock_alloc_skb(vq, out, in);
		if (!skb) {
			vq_err(vq, "Faulted on pkt\n");
			continue;
		}

		total_len += sizeof(pkt->hdr) + pkt->len;
		total_len += sizeof(*hdr) + skb->len;

		/* Deliver to monitoring devices all received packets */
		virtio_transport_deliver_tap_pkt(pkt);
		virtio_transport_deliver_tap_pkt(skb);

		hdr = virtio_vsock_hdr(skb);

		/* Only accept correctly addressed packets */
		if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
		    le64_to_cpu(pkt->hdr.dst_cid) ==
		if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
		    le64_to_cpu(hdr->dst_cid) ==
		    vhost_transport_get_local_cid())
			virtio_transport_recv_pkt(&vhost_transport, pkt);
			virtio_transport_recv_pkt(&vhost_transport, skb);
		else
			virtio_transport_free_pkt(pkt);
			kfree_skb(skb);

		vhost_add_used(vq, head, 0);
		added = true;
@@ -693,8 +667,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
		       VHOST_VSOCK_WEIGHT, true, NULL);

	file->private_data = vsock;
	spin_lock_init(&vsock->send_pkt_list_lock);
	INIT_LIST_HEAD(&vsock->send_pkt_list);
	skb_queue_head_init(&vsock->send_pkt_queue);
	vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
	return 0;

@@ -760,16 +733,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
	vhost_vsock_flush(vsock);
	vhost_dev_stop(&vsock->dev);

	spin_lock_bh(&vsock->send_pkt_list_lock);
	while (!list_empty(&vsock->send_pkt_list)) {
		struct virtio_vsock_pkt *pkt;

		pkt = list_first_entry(&vsock->send_pkt_list,
				struct virtio_vsock_pkt, list);
		list_del_init(&pkt->list);
		virtio_transport_free_pkt(pkt);
	}
	spin_unlock_bh(&vsock->send_pkt_list_lock);
	virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);

	vhost_dev_cleanup(&vsock->dev);
	kfree(vsock->dev.vqs);
+109 −20
Original line number Diff line number Diff line
@@ -7,6 +7,109 @@
#include <net/sock.h>
#include <net/af_vsock.h>

#define VIRTIO_VSOCK_SKB_HEADROOM (sizeof(struct virtio_vsock_hdr))

struct virtio_vsock_skb_cb {
	bool reply;
	bool tap_delivered;
};

#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))

static inline struct virtio_vsock_hdr *virtio_vsock_hdr(struct sk_buff *skb)
{
	return (struct virtio_vsock_hdr *)skb->head;
}

static inline bool virtio_vsock_skb_reply(struct sk_buff *skb)
{
	return VIRTIO_VSOCK_SKB_CB(skb)->reply;
}

static inline void virtio_vsock_skb_set_reply(struct sk_buff *skb)
{
	VIRTIO_VSOCK_SKB_CB(skb)->reply = true;
}

static inline bool virtio_vsock_skb_tap_delivered(struct sk_buff *skb)
{
	return VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered;
}

static inline void virtio_vsock_skb_set_tap_delivered(struct sk_buff *skb)
{
	VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = true;
}

static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
{
	VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
}

static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
{
	u32 len;

	len = le32_to_cpu(virtio_vsock_hdr(skb)->len);

	if (len > 0)
		skb_put(skb, len);
}

static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
{
	struct sk_buff *skb;

	if (size < VIRTIO_VSOCK_SKB_HEADROOM)
		return NULL;

	skb = alloc_skb(size, mask);
	if (!skb)
		return NULL;

	skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
	return skb;
}

static inline void
virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
{
	spin_lock_bh(&list->lock);
	__skb_queue_head(list, skb);
	spin_unlock_bh(&list->lock);
}

static inline void
virtio_vsock_skb_queue_tail(struct sk_buff_head *list, struct sk_buff *skb)
{
	spin_lock_bh(&list->lock);
	__skb_queue_tail(list, skb);
	spin_unlock_bh(&list->lock);
}

static inline struct sk_buff *virtio_vsock_skb_dequeue(struct sk_buff_head *list)
{
	struct sk_buff *skb;

	spin_lock_bh(&list->lock);
	skb = __skb_dequeue(list);
	spin_unlock_bh(&list->lock);

	return skb;
}

static inline void virtio_vsock_skb_queue_purge(struct sk_buff_head *list)
{
	spin_lock_bh(&list->lock);
	__skb_queue_purge(list);
	spin_unlock_bh(&list->lock);
}

static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
{
	return (size_t)(skb_end_pointer(skb) - skb->head);
}

#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE	(1024 * 4)
#define VIRTIO_VSOCK_MAX_BUF_SIZE		0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE		(1024 * 64)
@@ -35,23 +138,10 @@ struct virtio_vsock_sock {
	u32 last_fwd_cnt;
	u32 rx_bytes;
	u32 buf_alloc;
	struct list_head rx_queue;
	struct sk_buff_head rx_queue;
	u32 msg_count;
};

struct virtio_vsock_pkt {
	struct virtio_vsock_hdr	hdr;
	struct list_head list;
	/* socket refcnt not held, only use for cancellation */
	struct vsock_sock *vsk;
	void *buf;
	u32 buf_len;
	u32 len;
	u32 off;
	bool reply;
	bool tap_delivered;
};

struct virtio_vsock_pkt_info {
	u32 remote_cid, remote_port;
	struct vsock_sock *vsk;
@@ -68,7 +158,7 @@ struct virtio_transport {
	struct vsock_transport transport;

	/* Takes ownership of the packet */
	int (*send_pkt)(struct virtio_vsock_pkt *pkt);
	int (*send_pkt)(struct sk_buff *skb);
};

ssize_t
@@ -149,11 +239,10 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);

void virtio_transport_recv_pkt(struct virtio_transport *t,
			       struct virtio_vsock_pkt *pkt);
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
			       struct sk_buff *skb);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt);

void virtio_transport_deliver_tap_pkt(struct sk_buff *skb);
int virtio_transport_purge_skbs(void *vsk, struct sk_buff_head *list);
#endif /* _LINUX_VIRTIO_VSOCK_H */
+52 −97
Original line number Diff line number Diff line
@@ -42,8 +42,7 @@ struct virtio_vsock {
	bool tx_run;

	struct work_struct send_pkt_work;
	spinlock_t send_pkt_list_lock;
	struct list_head send_pkt_list;
	struct sk_buff_head send_pkt_queue;

	atomic_t queued_replies;

@@ -101,41 +100,31 @@ virtio_transport_send_pkt_work(struct work_struct *work)
	vq = vsock->vqs[VSOCK_VQ_TX];

	for (;;) {
		struct virtio_vsock_pkt *pkt;
		struct scatterlist hdr, buf, *sgs[2];
		int ret, in_sg = 0, out_sg = 0;
		struct sk_buff *skb;
		bool reply;

		spin_lock_bh(&vsock->send_pkt_list_lock);
		if (list_empty(&vsock->send_pkt_list)) {
			spin_unlock_bh(&vsock->send_pkt_list_lock);
		skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
		if (!skb)
			break;
		}

		pkt = list_first_entry(&vsock->send_pkt_list,
				       struct virtio_vsock_pkt, list);
		list_del_init(&pkt->list);
		spin_unlock_bh(&vsock->send_pkt_list_lock);

		virtio_transport_deliver_tap_pkt(pkt);

		reply = pkt->reply;
		virtio_transport_deliver_tap_pkt(skb);
		reply = virtio_vsock_skb_reply(skb);

		sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
		sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
		sgs[out_sg++] = &hdr;
		if (pkt->buf) {
			sg_init_one(&buf, pkt->buf, pkt->len);
		if (skb->len > 0) {
			sg_init_one(&buf, skb->data, skb->len);
			sgs[out_sg++] = &buf;
		}

		ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
		ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
		/* Usually this means that there is no more space available in
		 * the vq
		 */
		if (ret < 0) {
			spin_lock_bh(&vsock->send_pkt_list_lock);
			list_add(&pkt->list, &vsock->send_pkt_list);
			spin_unlock_bh(&vsock->send_pkt_list_lock);
			virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
			break;
		}

@@ -164,32 +153,32 @@ virtio_transport_send_pkt_work(struct work_struct *work)
}

static int
virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
virtio_transport_send_pkt(struct sk_buff *skb)
{
	struct virtio_vsock_hdr *hdr;
	struct virtio_vsock *vsock;
	int len = pkt->len;
	int len = skb->len;

	hdr = virtio_vsock_hdr(skb);

	rcu_read_lock();
	vsock = rcu_dereference(the_virtio_vsock);
	if (!vsock) {
		virtio_transport_free_pkt(pkt);
		kfree_skb(skb);
		len = -ENODEV;
		goto out_rcu;
	}

	if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
		virtio_transport_free_pkt(pkt);
	if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) {
		kfree_skb(skb);
		len = -ENODEV;
		goto out_rcu;
	}

	if (pkt->reply)
	if (virtio_vsock_skb_reply(skb))
		atomic_inc(&vsock->queued_replies);

	spin_lock_bh(&vsock->send_pkt_list_lock);
	list_add_tail(&pkt->list, &vsock->send_pkt_list);
	spin_unlock_bh(&vsock->send_pkt_list_lock);

	virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
	queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);

out_rcu:
@@ -201,9 +190,7 @@ static int
virtio_transport_cancel_pkt(struct vsock_sock *vsk)
{
	struct virtio_vsock *vsock;
	struct virtio_vsock_pkt *pkt, *n;
	int cnt = 0, ret;
	LIST_HEAD(freeme);

	rcu_read_lock();
	vsock = rcu_dereference(the_virtio_vsock);
@@ -212,20 +199,7 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)
		goto out_rcu;
	}

	spin_lock_bh(&vsock->send_pkt_list_lock);
	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
		if (pkt->vsk != vsk)
			continue;
		list_move(&pkt->list, &freeme);
	}
	spin_unlock_bh(&vsock->send_pkt_list_lock);

	list_for_each_entry_safe(pkt, n, &freeme, list) {
		if (pkt->reply)
			cnt++;
		list_del(&pkt->list);
		virtio_transport_free_pkt(pkt);
	}
	cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);

	if (cnt) {
		struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
@@ -246,38 +220,28 @@ virtio_transport_cancel_pkt(struct vsock_sock *vsk)

static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
{
	int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
	struct virtio_vsock_pkt *pkt;
	struct scatterlist hdr, buf, *sgs[2];
	int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
	struct scatterlist pkt, *p;
	struct virtqueue *vq;
	struct sk_buff *skb;
	int ret;

	vq = vsock->vqs[VSOCK_VQ_RX];

	do {
		pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
		if (!pkt)
		skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL);
		if (!skb)
			break;

		pkt->buf = kmalloc(buf_len, GFP_KERNEL);
		if (!pkt->buf) {
			virtio_transport_free_pkt(pkt);
		memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM);
		sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len);
		p = &pkt;
		ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL);
		if (ret < 0) {
			kfree_skb(skb);
			break;
		}

		pkt->buf_len = buf_len;
		pkt->len = buf_len;

		sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
		sgs[0] = &hdr;

		sg_init_one(&buf, pkt->buf, buf_len);
		sgs[1] = &buf;
		ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
		if (ret) {
			virtio_transport_free_pkt(pkt);
			break;
		}
		vsock->rx_buf_nr++;
	} while (vq->num_free);
	if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
@@ -299,12 +263,12 @@ static void virtio_transport_tx_work(struct work_struct *work)
		goto out;

	do {
		struct virtio_vsock_pkt *pkt;
		struct sk_buff *skb;
		unsigned int len;

		virtqueue_disable_cb(vq);
		while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
			virtio_transport_free_pkt(pkt);
		while ((skb = virtqueue_get_buf(vq, &len)) != NULL) {
			consume_skb(skb);
			added = true;
		}
	} while (!virtqueue_enable_cb(vq));
@@ -529,7 +493,7 @@ static void virtio_transport_rx_work(struct work_struct *work)
	do {
		virtqueue_disable_cb(vq);
		for (;;) {
			struct virtio_vsock_pkt *pkt;
			struct sk_buff *skb;
			unsigned int len;

			if (!virtio_transport_more_replies(vsock)) {
@@ -540,23 +504,22 @@ static void virtio_transport_rx_work(struct work_struct *work)
				goto out;
			}

			pkt = virtqueue_get_buf(vq, &len);
			if (!pkt) {
			skb = virtqueue_get_buf(vq, &len);
			if (!skb)
				break;
			}

			vsock->rx_buf_nr--;

			/* Drop short/long packets */
			if (unlikely(len < sizeof(pkt->hdr) ||
				     len > sizeof(pkt->hdr) + pkt->len)) {
				virtio_transport_free_pkt(pkt);
			if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
				     len > virtio_vsock_skb_len(skb))) {
				kfree_skb(skb);
				continue;
			}

			pkt->len = len - sizeof(pkt->hdr);
			virtio_transport_deliver_tap_pkt(pkt);
			virtio_transport_recv_pkt(&virtio_transport, pkt);
			virtio_vsock_skb_rx_put(skb);
			virtio_transport_deliver_tap_pkt(skb);
			virtio_transport_recv_pkt(&virtio_transport, skb);
		}
	} while (!virtqueue_enable_cb(vq));

@@ -610,7 +573,7 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
{
	struct virtio_device *vdev = vsock->vdev;
	struct virtio_vsock_pkt *pkt;
	struct sk_buff *skb;

	/* Reset all connected sockets when the VQs disappear */
	vsock_for_each_connected_socket(&virtio_transport.transport,
@@ -637,23 +600,16 @@ static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
	virtio_reset_device(vdev);

	mutex_lock(&vsock->rx_lock);
	while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
		virtio_transport_free_pkt(pkt);
	while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
		kfree_skb(skb);
	mutex_unlock(&vsock->rx_lock);

	mutex_lock(&vsock->tx_lock);
	while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
		virtio_transport_free_pkt(pkt);
	while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
		kfree_skb(skb);
	mutex_unlock(&vsock->tx_lock);

	spin_lock_bh(&vsock->send_pkt_list_lock);
	while (!list_empty(&vsock->send_pkt_list)) {
		pkt = list_first_entry(&vsock->send_pkt_list,
				       struct virtio_vsock_pkt, list);
		list_del(&pkt->list);
		virtio_transport_free_pkt(pkt);
	}
	spin_unlock_bh(&vsock->send_pkt_list_lock);
	virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);

	/* Delete virtqueues and flush outstanding callbacks if any */
	vdev->config->del_vqs(vdev);
@@ -690,8 +646,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
	mutex_init(&vsock->tx_lock);
	mutex_init(&vsock->rx_lock);
	mutex_init(&vsock->event_lock);
	spin_lock_init(&vsock->send_pkt_list_lock);
	INIT_LIST_HEAD(&vsock->send_pkt_list);
	skb_queue_head_init(&vsock->send_pkt_queue);
	INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
	INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
	INIT_WORK(&vsock->event_work, virtio_transport_event_work);
+233 −189

File changed.

Preview size limit exceeded, changes collapsed.

+15 −36
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@ struct vsock_loopback {
	struct workqueue_struct *workqueue;

	spinlock_t pkt_list_lock; /* protects pkt_list */
	struct list_head pkt_list;
	struct sk_buff_head pkt_queue;
	struct work_struct pkt_work;
};

@@ -27,13 +27,13 @@ static u32 vsock_loopback_get_local_cid(void)
	return VMADDR_CID_LOCAL;
}

static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt)
static int vsock_loopback_send_pkt(struct sk_buff *skb)
{
	struct vsock_loopback *vsock = &the_vsock_loopback;
	int len = pkt->len;
	int len = skb->len;

	spin_lock_bh(&vsock->pkt_list_lock);
	list_add_tail(&pkt->list, &vsock->pkt_list);
	skb_queue_tail(&vsock->pkt_queue, skb);
	spin_unlock_bh(&vsock->pkt_list_lock);

	queue_work(vsock->workqueue, &vsock->pkt_work);
@@ -44,21 +44,8 @@ static int vsock_loopback_send_pkt(struct virtio_vsock_pkt *pkt)
static int vsock_loopback_cancel_pkt(struct vsock_sock *vsk)
{
	struct vsock_loopback *vsock = &the_vsock_loopback;
	struct virtio_vsock_pkt *pkt, *n;
	LIST_HEAD(freeme);

	spin_lock_bh(&vsock->pkt_list_lock);
	list_for_each_entry_safe(pkt, n, &vsock->pkt_list, list) {
		if (pkt->vsk != vsk)
			continue;
		list_move(&pkt->list, &freeme);
	}
	spin_unlock_bh(&vsock->pkt_list_lock);

	list_for_each_entry_safe(pkt, n, &freeme, list) {
		list_del(&pkt->list);
		virtio_transport_free_pkt(pkt);
	}
	virtio_transport_purge_skbs(vsk, &vsock->pkt_queue);

	return 0;
}
@@ -121,20 +108,18 @@ static void vsock_loopback_work(struct work_struct *work)
{
	struct vsock_loopback *vsock =
		container_of(work, struct vsock_loopback, pkt_work);
	LIST_HEAD(pkts);
	struct sk_buff_head pkts;
	struct sk_buff *skb;

	skb_queue_head_init(&pkts);

	spin_lock_bh(&vsock->pkt_list_lock);
	list_splice_init(&vsock->pkt_list, &pkts);
	skb_queue_splice_init(&vsock->pkt_queue, &pkts);
	spin_unlock_bh(&vsock->pkt_list_lock);

	while (!list_empty(&pkts)) {
		struct virtio_vsock_pkt *pkt;

		pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list);
		list_del_init(&pkt->list);

		virtio_transport_deliver_tap_pkt(pkt);
		virtio_transport_recv_pkt(&loopback_transport, pkt);
	while ((skb = __skb_dequeue(&pkts))) {
		virtio_transport_deliver_tap_pkt(skb);
		virtio_transport_recv_pkt(&loopback_transport, skb);
	}
}

@@ -148,7 +133,7 @@ static int __init vsock_loopback_init(void)
		return -ENOMEM;

	spin_lock_init(&vsock->pkt_list_lock);
	INIT_LIST_HEAD(&vsock->pkt_list);
	skb_queue_head_init(&vsock->pkt_queue);
	INIT_WORK(&vsock->pkt_work, vsock_loopback_work);

	ret = vsock_core_register(&loopback_transport.transport,
@@ -166,19 +151,13 @@ static int __init vsock_loopback_init(void)
static void __exit vsock_loopback_exit(void)
{
	struct vsock_loopback *vsock = &the_vsock_loopback;
	struct virtio_vsock_pkt *pkt;

	vsock_core_unregister(&loopback_transport.transport);

	flush_work(&vsock->pkt_work);

	spin_lock_bh(&vsock->pkt_list_lock);
	while (!list_empty(&vsock->pkt_list)) {
		pkt = list_first_entry(&vsock->pkt_list,
				       struct virtio_vsock_pkt, list);
		list_del(&pkt->list);
		virtio_transport_free_pkt(pkt);
	}
	virtio_vsock_skb_queue_purge(&vsock->pkt_queue);
	spin_unlock_bh(&vsock->pkt_list_lock);

	destroy_workqueue(vsock->workqueue);