Commit 1e0083bd authored by Arnd Bergmann's avatar Arnd Bergmann Committed by David S. Miller
Browse files

gve: DQO: avoid unused variable warnings

The use of dma_unmap_addr()/dma_unmap_len() in the driver causes
multiple warnings when these macros are defined as empty, e.g.
in an ARCH=i386 allmodconfig build:

drivers/net/ethernet/google/gve/gve_tx_dqo.c: In function 'gve_tx_add_skb_no_copy_dqo':
drivers/net/ethernet/google/gve/gve_tx_dqo.c:494:40: error: unused variable 'buf' [-Werror=unused-variable]
  494 |                 struct gve_tx_dma_buf *buf =

This is not how the NEED_DMA_MAP_STATE macros are meant to work,
as they rely on never using local variables or a temporary structure
like gve_tx_dma_buf.

Remote the gve_tx_dma_buf definition and open-code the contents
in all places to avoid the warning. This causes some rather long
lines but otherwise ends up making the driver slightly smaller.

Fixes: a57e5de4 ("gve: DQO: Add TX path")
Link: https://lore.kernel.org/netdev/20210723231957.1113800-1-bcf@google.com/
Link: https://lore.kernel.org/netdev/20210721151100.2042139-1-arnd@kernel.org/


Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent af3826db
Loading
Loading
Loading
Loading
+6 −7
Original line number Diff line number Diff line
@@ -224,11 +224,6 @@ struct gve_tx_iovec {
	u32 iov_padding; /* padding associated with this segment */
};

struct gve_tx_dma_buf {
	DEFINE_DMA_UNMAP_ADDR(dma);
	DEFINE_DMA_UNMAP_LEN(len);
};

/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
 * ring entry but only used for a pkt_desc not a seg_desc
 */
@@ -236,7 +231,10 @@ struct gve_tx_buffer_state {
	struct sk_buff *skb; /* skb for this pkt */
	union {
		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
		struct gve_tx_dma_buf buf;
		struct {
			DEFINE_DMA_UNMAP_ADDR(dma);
			DEFINE_DMA_UNMAP_LEN(len);
		};
	};
};

@@ -280,7 +278,8 @@ struct gve_tx_pending_packet_dqo {
	 * All others correspond to `skb`'s frags and should be unmapped with
	 * `dma_unmap_page`.
	 */
	struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
	DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
	DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
	u16 num_bufs;

	/* Linked list index to next element in the list, or -1 if none */
+10 −13
Original line number Diff line number Diff line
@@ -303,15 +303,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{
	if (info->skb) {
		dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma),
				 dma_unmap_len(&info->buf, len),
		dma_unmap_single(dev, dma_unmap_addr(info, dma),
				 dma_unmap_len(info, len),
				 DMA_TO_DEVICE);
		dma_unmap_len_set(&info->buf, len, 0);
		dma_unmap_len_set(info, len, 0);
	} else {
		dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma),
			       dma_unmap_len(&info->buf, len),
		dma_unmap_page(dev, dma_unmap_addr(info, dma),
			       dma_unmap_len(info, len),
			       DMA_TO_DEVICE);
		dma_unmap_len_set(&info->buf, len, 0);
		dma_unmap_len_set(info, len, 0);
	}
}

@@ -491,7 +491,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
	struct gve_tx_buffer_state *info;
	bool is_gso = skb_is_gso(skb);
	u32 idx = tx->req & tx->mask;
	struct gve_tx_dma_buf *buf;
	u64 addr;
	u32 len;
	int i;
@@ -515,9 +514,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
		tx->dma_mapping_error++;
		goto drop;
	}
	buf = &info->buf;
	dma_unmap_len_set(buf, len, len);
	dma_unmap_addr_set(buf, dma, addr);
	dma_unmap_len_set(info, len, len);
	dma_unmap_addr_set(info, dma, addr);

	payload_nfrags = shinfo->nr_frags;
	if (hlen < len) {
@@ -549,10 +547,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
			tx->dma_mapping_error++;
			goto unmap_drop;
		}
		buf = &tx->info[idx].buf;
		tx->info[idx].skb = NULL;
		dma_unmap_len_set(buf, len, len);
		dma_unmap_addr_set(buf, dma, addr);
		dma_unmap_len_set(&tx->info[idx], len, len);
		dma_unmap_addr_set(&tx->info[idx], dma, addr);

		gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
	}
+38 −46
Original line number Diff line number Diff line
@@ -85,17 +85,15 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
		int j;

		for (j = 0; j < cur_state->num_bufs; j++) {
			struct gve_tx_dma_buf *buf = &cur_state->bufs[j];

			if (j == 0) {
				dma_unmap_single(tx->dev,
						 dma_unmap_addr(buf, dma),
						 dma_unmap_len(buf, len),
					dma_unmap_addr(cur_state, dma[j]),
					dma_unmap_len(cur_state, len[j]),
					DMA_TO_DEVICE);
			} else {
				dma_unmap_page(tx->dev,
					       dma_unmap_addr(buf, dma),
					       dma_unmap_len(buf, len),
					dma_unmap_addr(cur_state, dma[j]),
					dma_unmap_len(cur_state, len[j]),
					DMA_TO_DEVICE);
			}
		}
@@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
	const bool is_gso = skb_is_gso(skb);
	u32 desc_idx = tx->dqo_tx.tail;

	struct gve_tx_pending_packet_dqo *pending_packet;
	struct gve_tx_pending_packet_dqo *pkt;
	struct gve_tx_metadata_dqo metadata;
	s16 completion_tag;
	int i;

	pending_packet = gve_alloc_pending_packet(tx);
	pending_packet->skb = skb;
	pending_packet->num_bufs = 0;
	completion_tag = pending_packet - tx->dqo.pending_packets;
	pkt = gve_alloc_pending_packet(tx);
	pkt->skb = skb;
	pkt->num_bufs = 0;
	completion_tag = pkt - tx->dqo.pending_packets;

	gve_extract_tx_metadata_dqo(skb, &metadata);
	if (is_gso) {
@@ -493,8 +491,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,

	/* Map the linear portion of skb */
	{
		struct gve_tx_dma_buf *buf =
			&pending_packet->bufs[pending_packet->num_bufs];
		u32 len = skb_headlen(skb);
		dma_addr_t addr;

@@ -502,9 +498,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
		if (unlikely(dma_mapping_error(tx->dev, addr)))
			goto err;

		dma_unmap_len_set(buf, len, len);
		dma_unmap_addr_set(buf, dma, addr);
		++pending_packet->num_bufs;
		dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
		dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
		++pkt->num_bufs;

		gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
					 completion_tag,
@@ -512,8 +508,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
	}

	for (i = 0; i < shinfo->nr_frags; i++) {
		struct gve_tx_dma_buf *buf =
			&pending_packet->bufs[pending_packet->num_bufs];
		const skb_frag_t *frag = &shinfo->frags[i];
		bool is_eop = i == (shinfo->nr_frags - 1);
		u32 len = skb_frag_size(frag);
@@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
		if (unlikely(dma_mapping_error(tx->dev, addr)))
			goto err;

		dma_unmap_len_set(buf, len, len);
		dma_unmap_addr_set(buf, dma, addr);
		++pending_packet->num_bufs;
		dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
		dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
		++pkt->num_bufs;

		gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
					 completion_tag, is_eop, is_gso);
@@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
	return 0;

err:
	for (i = 0; i < pending_packet->num_bufs; i++) {
		struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];

	for (i = 0; i < pkt->num_bufs; i++) {
		if (i == 0) {
			dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma),
					 dma_unmap_len(buf, len),
			dma_unmap_single(tx->dev,
					 dma_unmap_addr(pkt, dma[i]),
					 dma_unmap_len(pkt, len[i]),
					 DMA_TO_DEVICE);
		} else {
			dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma),
				       dma_unmap_len(buf, len), DMA_TO_DEVICE);
			dma_unmap_page(tx->dev,
				       dma_unmap_addr(pkt, dma[i]),
				       dma_unmap_len(pkt, len[i]),
				       DMA_TO_DEVICE);
		}
	}

	pending_packet->skb = NULL;
	pending_packet->num_bufs = 0;
	gve_free_pending_packet(tx, pending_packet);
	pkt->skb = NULL;
	pkt->num_bufs = 0;
	gve_free_pending_packet(tx, pkt);

	return -1;
}
@@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,

static void remove_from_list(struct gve_tx_ring *tx,
			     struct gve_index_list *list,
			     struct gve_tx_pending_packet_dqo *pending_packet)
			     struct gve_tx_pending_packet_dqo *pkt)
{
	s16 prev_index, next_index;

	prev_index = pending_packet->prev;
	next_index = pending_packet->next;
	prev_index = pkt->prev;
	next_index = pkt->next;

	if (prev_index == -1) {
		/* Node is head */
@@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx,
}

static void gve_unmap_packet(struct device *dev,
			     struct gve_tx_pending_packet_dqo *pending_packet)
			     struct gve_tx_pending_packet_dqo *pkt)
{
	struct gve_tx_dma_buf *buf;
	int i;

	/* SKB linear portion is guaranteed to be mapped */
	buf = &pending_packet->bufs[0];
	dma_unmap_single(dev, dma_unmap_addr(buf, dma),
			 dma_unmap_len(buf, len), DMA_TO_DEVICE);
	for (i = 1; i < pending_packet->num_bufs; i++) {
		buf = &pending_packet->bufs[i];
		dma_unmap_page(dev, dma_unmap_addr(buf, dma),
			       dma_unmap_len(buf, len), DMA_TO_DEVICE);
	}
	pending_packet->num_bufs = 0;
	dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
			 dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
	for (i = 1; i < pkt->num_bufs; i++) {
		dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
			       dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
	}
	pkt->num_bufs = 0;
}

/* Completion types and expected behavior: