Commit 61d72c7e authored by Tao Liu's avatar Tao Liu Committed by David S. Miller
Browse files

gve: Do lazy cleanup in TX path



When TX queue is full, attemt to process enough TX completions
to avoid stalling the queue.

Fixes: f5cedc84 ("gve: Add transmit and receive support")
Signed-off-by: default avatarTao Liu <xliutaox@google.com>
Signed-off-by: default avatarCatherine Sullivan <csully@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 58401b2a
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -341,8 +341,8 @@ struct gve_tx_ring {
	union {
		/* GQI fields */
		struct {
			/* NIC tail pointer */
			__be32 last_nic_done;
			/* Spinlock for when cleanup in progress */
			spinlock_t clean_lock;
		};

		/* DQO fields. */
@@ -821,8 +821,9 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv);
void gve_tx_free_rings_gqi(struct gve_priv *priv);
__be32 gve_tx_load_event_counter(struct gve_priv *priv,
u32 gve_tx_load_event_counter(struct gve_priv *priv,
			      struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
/* rx handling */
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
+1 −2
Original line number Diff line number Diff line
@@ -330,8 +330,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
			data[i++] = tmp_tx_bytes;
			data[i++] = tx->wake_queue;
			data[i++] = tx->stop_queue;
			data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
									  tx));
			data[i++] = gve_tx_load_event_counter(priv, tx);
			data[i++] = tx->dma_mapping_error;
			/* stats from NIC */
			if (skip_nic_stats) {
+3 −3
Original line number Diff line number Diff line
@@ -212,13 +212,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
		irq_doorbell = gve_irq_doorbell(priv, block);
		iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);

		/* Double check we have no extra work.
		 * Ensure unmask synchronizes with checking for work.
		/* Ensure IRQ ACK is visible before we check pending work.
		 * If queue had issued updates, it would be truly visible.
		 */
		mb();

		if (block->tx)
			reschedule |= gve_tx_poll(block, -1);
			reschedule |= gve_tx_clean_pending(priv, block->tx);
		if (block->rx)
			reschedule |= gve_rx_work_pending(block->rx);

+53 −41
Original line number Diff line number Diff line
@@ -144,7 +144,7 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)

	gve_tx_remove_from_block(priv, idx);
	slots = tx->mask + 1;
	gve_clean_tx_done(priv, tx, tx->req, false);
	gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
	netdev_tx_reset_queue(tx->netdev_txq);

	dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -176,6 +176,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)

	/* Make sure everything is zeroed to start */
	memset(tx, 0, sizeof(*tx));
	spin_lock_init(&tx->clean_lock);
	tx->q_num = idx;

	tx->mask = slots - 1;
@@ -328,10 +329,16 @@ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
	return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
}

static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);

/* Stops the queue if the skb cannot be transmitted. */
static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
			     struct sk_buff *skb)
{
	int bytes_required = 0;
	u32 nic_done;
	u32 to_do;
	int ret;

	if (!tx->raw_addressing)
		bytes_required = gve_skb_fifo_bytes_required(tx, skb);
@@ -339,29 +346,28 @@ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
	if (likely(gve_can_tx(tx, bytes_required)))
		return 0;

	ret = -EBUSY;
	spin_lock(&tx->clean_lock);
	nic_done = gve_tx_load_event_counter(priv, tx);
	to_do = nic_done - tx->done;

	/* Only try to clean if there is hope for TX */
	if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
		if (to_do > 0) {
			to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
			gve_clean_tx_done(priv, tx, to_do, false);
		}
		if (likely(gve_can_tx(tx, bytes_required)))
			ret = 0;
	}
	if (ret) {
		/* No space, so stop the queue */
		tx->stop_queue++;
		netif_tx_stop_queue(tx->netdev_txq);
	smp_mb();	/* sync with restarting queue in gve_clean_tx_done() */

	/* Now check for resources again, in case gve_clean_tx_done() freed
	 * resources after we checked and we stopped the queue after
	 * gve_clean_tx_done() checked.
	 *
	 * gve_maybe_stop_tx()			gve_clean_tx_done()
	 *   nsegs/can_alloc test failed
	 *					  gve_tx_free_fifo()
	 *					  if (tx queue stopped)
	 *					    netif_tx_queue_wake()
	 *   netif_tx_stop_queue()
	 *   Need to check again for space here!
	 */
	if (likely(!gve_can_tx(tx, bytes_required)))
		return -EBUSY;
	}
	spin_unlock(&tx->clean_lock);

	netif_tx_start_queue(tx->netdev_txq);
	tx->wake_queue++;
	return 0;
	return ret;
}

static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
@@ -576,7 +582,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
	WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
	     "skb queue index out of range");
	tx = &priv->tx[skb_get_queue_mapping(skb)];
	if (unlikely(gve_maybe_stop_tx(tx, skb))) {
	if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
		/* We need to ring the txq doorbell -- we have stopped the Tx
		 * queue for want of resources, but prior calls to gve_tx()
		 * may have added descriptors without ringing the doorbell.
@@ -672,19 +678,19 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
	return pkts;
}

__be32 gve_tx_load_event_counter(struct gve_priv *priv,
u32 gve_tx_load_event_counter(struct gve_priv *priv,
			      struct gve_tx_ring *tx)
{
	u32 counter_index = be32_to_cpu((tx->q_resources->counter_index));
	u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
	__be32 counter = READ_ONCE(priv->counter_array[counter_index]);

	return READ_ONCE(priv->counter_array[counter_index]);
	return be32_to_cpu(counter);
}

bool gve_tx_poll(struct gve_notify_block *block, int budget)
{
	struct gve_priv *priv = block->priv;
	struct gve_tx_ring *tx = block->tx;
	bool repoll = false;
	u32 nic_done;
	u32 to_do;

@@ -692,17 +698,23 @@ bool gve_tx_poll(struct gve_notify_block *block, int budget)
	if (budget == 0)
		budget = INT_MAX;

	/* Find out how much work there is to be done */
	tx->last_nic_done = gve_tx_load_event_counter(priv, tx);
	nic_done = be32_to_cpu(tx->last_nic_done);
	if (budget > 0) {
		/* Do as much work as we have that the budget will
		 * allow
	/* In TX path, it may try to clean completed pkts in order to xmit,
	 * to avoid cleaning conflict, use spin_lock(), it yields better
	 * concurrency between xmit/clean than netif's lock.
	 */
	spin_lock(&tx->clean_lock);
	/* Find out how much work there is to be done */
	nic_done = gve_tx_load_event_counter(priv, tx);
	to_do = min_t(u32, (nic_done - tx->done), budget);
	gve_clean_tx_done(priv, tx, to_do, true);
	}
	spin_unlock(&tx->clean_lock);
	/* If we still have work we want to repoll */
	repoll |= (nic_done != tx->done);
	return repoll;
	return nic_done != tx->done;
}

bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
{
	u32 nic_done = gve_tx_load_event_counter(priv, tx);

	return nic_done != tx->done;
}