Commit 917fd7d6 authored by Paolo Abeni's avatar Paolo Abeni
Browse files

Merge branch 'xen-netback-fix-issue-introduced-recently'

Juergen Gross says:

====================
xen/netback: fix issue introduced recently

The fix for XSA-423 introduced a bug which resulted in loss of network
connection in some configurations.

The first patch is fixing the issue, while the second one is removing
a test which isn't needed.
====================

Link: https://lore.kernel.org/r/20230327083646.18690-1-jgross@suse.com


Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents f22c993f 8fb8ebf9
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
	struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
	grant_handle_t grant_tx_handle[MAX_PENDING_REQS];

	struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
	struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
	struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
	struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
	/* passed to gnttab_[un]map_refs with pages under (un)mapping */
+23 −6
Original line number Diff line number Diff line
@@ -334,6 +334,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
struct xenvif_tx_cb {
	u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
	u8 copy_count;
	u32 split_mask;
};

#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
@@ -361,6 +362,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
	struct sk_buff *skb =
		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
			  GFP_ATOMIC | __GFP_NOWARN);

	BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
	if (unlikely(skb == NULL))
		return NULL;

@@ -396,11 +399,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
	nr_slots = shinfo->nr_frags + 1;

	copy_count(skb) = 0;
	XENVIF_TX_CB(skb)->split_mask = 0;

	/* Create copy ops for exactly data_len bytes into the skb head. */
	__skb_put(skb, data_len);
	while (data_len > 0) {
		int amount = data_len > txp->size ? txp->size : data_len;
		bool split = false;

		cop->source.u.ref = txp->gref;
		cop->source.domid = queue->vif->domid;
@@ -413,6 +418,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
		cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
				               - data_len);

		/* Don't cross local page boundary! */
		if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
			amount = XEN_PAGE_SIZE - cop->dest.offset;
			XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
			split = true;
		}

		cop->len = amount;
		cop->flags = GNTCOPY_source_gref;

@@ -420,6 +432,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
		pending_idx = queue->pending_ring[index];
		callback_param(queue, pending_idx).ctx = NULL;
		copy_pending_idx(skb, copy_count(skb)) = pending_idx;
		if (!split)
			copy_count(skb)++;

		cop++;
@@ -441,7 +454,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
			nr_slots--;
		} else {
			/* The copy op partially covered the tx_request.
			 * The remainder will be mapped.
			 * The remainder will be mapped or copied in the next
			 * iteration.
			 */
			txp->offset += amount;
			txp->size -= amount;
@@ -539,6 +553,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
		pending_idx = copy_pending_idx(skb, i);

		newerr = (*gopp_copy)->status;

		/* Split copies need to be handled together. */
		if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
			(*gopp_copy)++;
			if (!newerr)
				newerr = (*gopp_copy)->status;
		}
		if (likely(!newerr)) {
			/* The first frag might still have this slot mapped */
			if (i < copy_count(skb) - 1 || !sharedslot)
@@ -1061,10 +1082,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
		__skb_queue_tail(&queue->tx_queue, skb);

		queue->tx.req_cons = idx;

		if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
		    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
			break;
	}

	return;