Commit e679198b authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'gve-improvements'



Jeroen de Borst says:

====================
gve: minor code and performance improvements

This patchset contains a number of independent minor code and performance
improvements.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ce8bd03c 1b4d1c9b
Loading
Loading
Loading
Loading
+10 −8
Original line number Diff line number Diff line
@@ -30,7 +30,7 @@
#define GVE_MIN_MSIX 3

/* Numbers of gve tx/rx stats in stats report. */
#define GVE_TX_STATS_REPORT_NUM	5
#define GVE_TX_STATS_REPORT_NUM	6
#define GVE_RX_STATS_REPORT_NUM	2

/* Interval to schedule a stats report update, 20000ms. */
@@ -341,8 +341,8 @@ struct gve_tx_ring {
	union {
		/* GQI fields */
		struct {
			/* NIC tail pointer */
			__be32 last_nic_done;
			/* Spinlock for when cleanup in progress */
			spinlock_t clean_lock;
		};

		/* DQO fields. */
@@ -413,7 +413,9 @@ struct gve_tx_ring {
	u32 q_num ____cacheline_aligned; /* queue idx */
	u32 stop_queue; /* count of queue stops */
	u32 wake_queue; /* count of queue wakes */
	u32 queue_timeout; /* count of queue timeouts */
	u32 ntfy_id; /* notification block index */
	u32 last_kick_msec; /* Last time the queue was kicked */
	dma_addr_t bus; /* dma address of the descr ring */
	dma_addr_t q_resources_bus; /* dma address of the queue resources */
	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
@@ -821,15 +823,15 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv);
void gve_tx_free_rings_gqi(struct gve_priv *priv);
__be32 gve_tx_load_event_counter(struct gve_priv *priv,
u32 gve_tx_load_event_counter(struct gve_priv *priv,
			      struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
/* rx handling */
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
bool gve_rx_poll(struct gve_notify_block *block, int budget);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv);
void gve_rx_free_rings_gqi(struct gve_priv *priv);
bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
		       netdev_features_t feat);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
+1 −0
Original line number Diff line number Diff line
@@ -270,6 +270,7 @@ enum gve_stat_names {
	TX_LAST_COMPLETION_PROCESSED	= 5,
	RX_NEXT_EXPECTED_SEQUENCE	= 6,
	RX_BUFFERS_POSTED		= 7,
	TX_TIMEOUT_CNT			= 8,
	// stats from NIC
	RX_QUEUE_DROP_CNT		= 65,
	RX_NO_BUFFERS_POSTED		= 66,
+1 −2
Original line number Diff line number Diff line
@@ -330,8 +330,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
			data[i++] = tmp_tx_bytes;
			data[i++] = tx->wake_queue;
			data[i++] = tx->stop_queue;
			data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
									  tx));
			data[i++] = gve_tx_load_event_counter(priv, tx);
			data[i++] = tx->dma_mapping_error;
			/* stats from NIC */
			if (skip_nic_stats) {
+69 −17
Original line number Diff line number Diff line
@@ -24,6 +24,9 @@
#define GVE_VERSION		"1.0.0"
#define GVE_VERSION_PREFIX	"GVE-"

// Minimum amount of time between queue kicks in msec (10 seconds)
#define MIN_TX_TIMEOUT_GAP (1000 * 10)

const char gve_version_str[] = GVE_VERSION;
static const char gve_version_prefix[] = GVE_VERSION_PREFIX;

@@ -192,34 +195,40 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
	__be32 __iomem *irq_doorbell;
	bool reschedule = false;
	struct gve_priv *priv;
	int work_done = 0;

	block = container_of(napi, struct gve_notify_block, napi);
	priv = block->priv;

	if (block->tx)
		reschedule |= gve_tx_poll(block, budget);
	if (block->rx)
		reschedule |= gve_rx_poll(block, budget);
	if (block->rx) {
		work_done = gve_rx_poll(block, budget);
		reschedule |= work_done == budget;
	}

	if (reschedule)
		return budget;

	napi_complete(napi);
       /* Complete processing - don't unmask irq if busy polling is enabled */
	if (likely(napi_complete_done(napi, work_done))) {
		irq_doorbell = gve_irq_doorbell(priv, block);
		iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);

	/* Double check we have no extra work.
	 * Ensure unmask synchronizes with checking for work.
		/* Ensure IRQ ACK is visible before we check pending work.
		 * If queue had issued updates, it would be truly visible.
		 */
		mb();

		if (block->tx)
		reschedule |= gve_tx_poll(block, -1);
			reschedule |= gve_tx_clean_pending(priv, block->tx);
		if (block->rx)
		reschedule |= gve_rx_poll(block, -1);
			reschedule |= gve_rx_work_pending(block->rx);

		if (reschedule && napi_reschedule(napi))
			iowrite32be(GVE_IRQ_MASK, irq_doorbell);

	return 0;
	}
	return work_done;
}

static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
@@ -1115,9 +1124,47 @@ static void gve_turnup(struct gve_priv *priv)

static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
	struct gve_priv *priv = netdev_priv(dev);
	struct gve_notify_block *block;
	struct gve_tx_ring *tx = NULL;
	struct gve_priv *priv;
	u32 last_nic_done;
	u32 current_time;
	u32 ntfy_idx;

	netdev_info(dev, "Timeout on tx queue, %d", txqueue);
	priv = netdev_priv(dev);
	if (txqueue > priv->tx_cfg.num_queues)
		goto reset;

	ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
	if (ntfy_idx > priv->num_ntfy_blks)
		goto reset;

	block = &priv->ntfy_blocks[ntfy_idx];
	tx = block->tx;

	current_time = jiffies_to_msecs(jiffies);
	if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
		goto reset;

	/* Check to see if there are missed completions, which will allow us to
	 * kick the queue.
	 */
	last_nic_done = gve_tx_load_event_counter(priv, tx);
	if (last_nic_done - tx->done) {
		netdev_info(dev, "Kicking queue %d", txqueue);
		iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
		napi_schedule(&block->napi);
		tx->last_kick_msec = current_time;
		goto out;
	} // Else reset.

reset:
	gve_schedule_reset(priv);

out:
	if (tx)
		tx->queue_timeout++;
	priv->tx_timeo_cnt++;
}

@@ -1246,6 +1293,11 @@ void gve_handle_report_stats(struct gve_priv *priv)
				.value = cpu_to_be64(last_completion),
				.queue_id = cpu_to_be32(idx),
			};
			stats[stats_idx++] = (struct stats) {
				.stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
				.value = cpu_to_be64(priv->tx[idx].queue_timeout),
				.queue_id = cpu_to_be32(idx),
			};
		}
	}
	/* rx stats */
+63 −35
Original line number Diff line number Diff line
@@ -16,19 +16,23 @@ static void gve_rx_free_buffer(struct device *dev,
	dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
				      GVE_DATA_SLOT_ADDR_PAGE_MASK);

	page_ref_sub(page_info->page, page_info->pagecnt_bias - 1);
	gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
}

static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
{
	if (rx->data.raw_addressing) {
	u32 slots = rx->mask + 1;
	int i;

	if (rx->data.raw_addressing) {
		for (i = 0; i < slots; i++)
			gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
					   &rx->data.data_ring[i]);
	} else {
		for (i = 0; i < slots; i++)
			page_ref_sub(rx->data.page_info[i].page,
				     rx->data.page_info[i].pagecnt_bias - 1);
		gve_unassign_qpl(priv, rx->data.qpl->id);
		rx->data.qpl = NULL;
	}
@@ -69,6 +73,9 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
	page_info->page_offset = 0;
	page_info->page_address = page_address(page);
	*slot_addr = cpu_to_be64(addr);
	/* The page already has 1 ref */
	page_ref_add(page, INT_MAX - 1);
	page_info->pagecnt_bias = INT_MAX;
}

static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
@@ -295,21 +302,22 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl

static bool gve_rx_can_flip_buffers(struct net_device *netdev)
{
	return PAGE_SIZE == 4096
	return PAGE_SIZE >= 4096
		? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false;
}

static int gve_rx_can_recycle_buffer(struct page *page)
static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
{
	int pagecount = page_count(page);
	int pagecount = page_count(page_info->page);

	/* This page is not being used by any SKBs - reuse */
	if (pagecount == 1)
	if (pagecount == page_info->pagecnt_bias)
		return 1;
	/* This page is still being used by an SKB - we can't reuse */
	else if (pagecount >= 2)
	else if (pagecount > page_info->pagecnt_bias)
		return 0;
	WARN(pagecount < 1, "Pagecount should never be < 1");
	WARN(pagecount < page_info->pagecnt_bias,
	     "Pagecount should never be less than the bias.");
	return -1;
}

@@ -325,11 +333,11 @@ gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
	if (!skb)
		return NULL;

	/* Optimistically stop the kernel from freeing the page by increasing
	 * the page bias. We will check the refcount in refill to determine if
	 * we need to alloc a new page.
	/* Optimistically stop the kernel from freeing the page.
	 * We will check again in refill to determine if we need to alloc a
	 * new page.
	 */
	get_page(page_info->page);
	gve_dec_pagecnt_bias(page_info);

	return skb;
}
@@ -352,7 +360,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
		/* No point in recycling if we didn't get the skb */
		if (skb) {
			/* Make sure that the page isn't freed. */
			get_page(page_info->page);
			gve_dec_pagecnt_bias(page_info);
			gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
		}
	} else {
@@ -376,8 +384,18 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
	union gve_rx_data_slot *data_slot;
	struct sk_buff *skb = NULL;
	dma_addr_t page_bus;
	void *va;
	u16 len;

	/* Prefetch two packet pages ahead, we will need it soon. */
	page_info = &rx->data.page_info[(idx + 2) & rx->mask];
	va = page_info->page_address + GVE_RX_PAD +
		page_info->page_offset;

	prefetch(page_info->page); /* Kernel page struct. */
	prefetch(va);              /* Packet header. */
	prefetch(va + 64);         /* Next cacheline too. */

	/* drop this packet */
	if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
		u64_stats_update_begin(&rx->statss);
@@ -408,7 +426,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
		int recycle = 0;

		if (can_flip) {
			recycle = gve_rx_can_recycle_buffer(page_info->page);
			recycle = gve_rx_can_recycle_buffer(page_info);
			if (recycle < 0) {
				if (!rx->data.raw_addressing)
					gve_schedule_reset(priv);
@@ -456,7 +474,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
	return true;
}

static bool gve_rx_work_pending(struct gve_rx_ring *rx)
bool gve_rx_work_pending(struct gve_rx_ring *rx)
{
	struct gve_rx_desc *desc;
	__be16 flags_seq;
@@ -499,7 +517,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
			 * owns half the page it is impossible to tell which half. Either
			 * the whole page is free or it needs to be replaced.
			 */
			int recycle = gve_rx_can_recycle_buffer(page_info->page);
			int recycle = gve_rx_can_recycle_buffer(page_info);

			if (recycle < 0) {
				if (!rx->data.raw_addressing)
@@ -514,17 +532,22 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)

				gve_rx_free_buffer(dev, page_info, data_slot);
				page_info->page = NULL;
				if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot))
				if (gve_rx_alloc_buffer(priv, dev, page_info,
							data_slot)) {
					u64_stats_update_begin(&rx->statss);
					rx->rx_buf_alloc_fail++;
					u64_stats_update_end(&rx->statss);
					break;
				}
			}
		}
		fill_cnt++;
	}
	rx->fill_cnt = fill_cnt;
	return true;
}

bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
			     netdev_features_t feat)
{
	struct gve_priv *priv = rx->gve;
@@ -546,6 +569,10 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
			   "[%d] seqno=%d rx->desc.seqno=%d\n",
			   rx->q_num, GVE_SEQNO(desc->flags_seq),
			   rx->desc.seqno);

		/* prefetch two descriptors ahead */
		prefetch(rx->desc.desc_ring + ((cnt + 2) & rx->mask));

		dropped = !gve_rx(rx, desc, feat, idx);
		if (!dropped) {
			bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
@@ -559,13 +586,15 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
	}

	if (!work_done && rx->fill_cnt - cnt > rx->db_threshold)
		return false;
		return 0;

	if (work_done) {
		u64_stats_update_begin(&rx->statss);
		rx->rpackets += packets;
		rx->rbytes += bytes;
		u64_stats_update_end(&rx->statss);
		rx->cnt = cnt;
	}

	/* restock ring slots */
	if (!rx->data.raw_addressing) {
@@ -576,26 +605,26 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
		 * falls below a threshold.
		 */
		if (!gve_rx_refill_buffers(priv, rx))
			return false;
			return 0;

		/* If we were not able to completely refill buffers, we'll want
		 * to schedule this queue for work again to refill buffers.
		 */
		if (rx->fill_cnt - cnt <= rx->db_threshold) {
			gve_rx_write_doorbell(priv, rx);
			return true;
			return budget;
		}
	}

	gve_rx_write_doorbell(priv, rx);
	return gve_rx_work_pending(rx);
	return work_done;
}

bool gve_rx_poll(struct gve_notify_block *block, int budget)
int gve_rx_poll(struct gve_notify_block *block, int budget)
{
	struct gve_rx_ring *rx = block->rx;
	netdev_features_t feat;
	bool repoll = false;
	int work_done = 0;

	feat = block->napi.dev->features;

@@ -604,8 +633,7 @@ bool gve_rx_poll(struct gve_notify_block *block, int budget)
		budget = INT_MAX;

	if (budget > 0)
		repoll |= gve_clean_rx_done(rx, budget, feat);
	else
		repoll |= gve_rx_work_pending(rx);
	return repoll;
		work_done = gve_clean_rx_done(rx, budget, feat);

	return work_done;
}
Loading