Commit fc720399 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'bnxt_en-update-for-net-next'

Michael Chan says:

====================
bnxt_en: Update for net-next

This patchset contains 2 features:

- The page pool implementation for the normal RX path (non-XDP) for
paged buffers in the aggregation ring.

- Saving of the ring error counters across reset.
====================

Link: https://lore.kernel.org/r/20230817231911.165035-1-michael.chan@broadcom.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents c6cfc6cd 8becd196
Loading
Loading
Loading
Loading
+62 −82
Original line number Diff line number Diff line
@@ -761,7 +761,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
					 unsigned int *offset,
					 gfp_t gfp)
{
	struct device *dev = &bp->pdev->dev;
	struct page *page;

	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
@@ -774,12 +773,7 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
	if (!page)
		return NULL;

	*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
				      bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
	if (dma_mapping_error(dev, *mapping)) {
		page_pool_recycle_direct(rxr->page_pool, page);
		return NULL;
	}
	*mapping = page_pool_get_dma_addr(page) + *offset;
	return page;
}

@@ -877,49 +871,16 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
	struct rx_bd *rxbd =
		&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
	struct pci_dev *pdev = bp->pdev;
	struct page *page;
	dma_addr_t mapping;
	u16 sw_prod = rxr->rx_sw_agg_prod;
	unsigned int offset = 0;

	if (BNXT_RX_PAGE_MODE(bp)) {
	page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);

	if (!page)
		return -ENOMEM;

	} else {
		if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
			page = rxr->rx_page;
			if (!page) {
				page = alloc_page(gfp);
				if (!page)
					return -ENOMEM;
				rxr->rx_page = page;
				rxr->rx_page_offset = 0;
			}
			offset = rxr->rx_page_offset;
			rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
			if (rxr->rx_page_offset == PAGE_SIZE)
				rxr->rx_page = NULL;
			else
				get_page(page);
		} else {
			page = alloc_page(gfp);
			if (!page)
				return -ENOMEM;
		}

		mapping = dma_map_page_attrs(&pdev->dev, page, offset,
					     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
					     DMA_ATTR_WEAK_ORDERING);
		if (dma_mapping_error(&pdev->dev, mapping)) {
			__free_page(page);
			return -EIO;
		}
	}

	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);

@@ -1031,8 +992,8 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
		return NULL;
	}
	dma_addr -= bp->rx_dma_offset;
	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
			     bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
				bp->rx_dir);
	skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
	if (!skb) {
		page_pool_recycle_direct(rxr->page_pool, page);
@@ -1065,8 +1026,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
		return NULL;
	}
	dma_addr -= bp->rx_dma_offset;
	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
			     bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
				bp->rx_dir);

	if (unlikely(!payload))
		payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -1182,9 +1143,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
			return 0;
		}

		dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
				     bp->rx_dir,
				     DMA_ATTR_WEAK_ORDERING);
		dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
					bp->rx_dir);

		total_frag_len += frag_len;
		prod = NEXT_RX_AGG(prod);
@@ -1204,6 +1164,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
	total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
					     agg_bufs, tpa, NULL);
	if (!total_frag_len) {
		skb_mark_for_recycle(skb);
		dev_kfree_skb(skb);
		return NULL;
	}
@@ -1794,6 +1755,7 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
		return;
	}
	skb_record_rx_queue(skb, bnapi->index);
	skb_mark_for_recycle(skb);
	napi_gro_receive(&bnapi->napi, skb);
}

@@ -2978,10 +2940,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)

		rx_buf->data = NULL;
		if (BNXT_RX_PAGE_MODE(bp)) {
			mapping -= bp->rx_dma_offset;
			dma_unmap_page_attrs(&pdev->dev, mapping,
					     BNXT_RX_PAGE_SIZE, bp->rx_dir,
					     DMA_ATTR_WEAK_ORDERING);
			page_pool_recycle_direct(rxr->page_pool, data);
		} else {
			dma_unmap_single_attrs(&pdev->dev, mapping,
@@ -3002,30 +2960,13 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
		if (!page)
			continue;

		if (BNXT_RX_PAGE_MODE(bp)) {
			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
					     BNXT_RX_PAGE_SIZE, bp->rx_dir,
					     DMA_ATTR_WEAK_ORDERING);
		rx_agg_buf->page = NULL;
		__clear_bit(i, rxr->rx_agg_bmap);

		page_pool_recycle_direct(rxr->page_pool, page);
		} else {
			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
					     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
					     DMA_ATTR_WEAK_ORDERING);
			rx_agg_buf->page = NULL;
			__clear_bit(i, rxr->rx_agg_bmap);

			__free_page(page);
		}
	}

skip_rx_agg_free:
	if (rxr->rx_page) {
		__free_page(rxr->rx_page);
		rxr->rx_page = NULL;
	}
	map = rxr->rx_tpa_idx_map;
	if (map)
		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
@@ -3244,11 +3185,15 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
{
	struct page_pool_params pp = { 0 };

	pp.pool_size = bp->rx_ring_size;
	pp.pool_size = bp->rx_agg_ring_size;
	if (BNXT_RX_PAGE_MODE(bp))
		pp.pool_size += bp->rx_ring_size;
	pp.nid = dev_to_node(&bp->pdev->dev);
	pp.napi = &rxr->bnapi->napi;
	pp.dev = &bp->pdev->dev;
	pp.dma_dir = DMA_BIDIRECTIONAL;
	pp.dma_dir = bp->rx_dir;
	pp.max_len = PAGE_SIZE;
	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
		pp.flags |= PP_FLAG_PAGE_FRAG;

@@ -9448,10 +9393,16 @@ static void bnxt_disable_napi(struct bnxt *bp)
		return;

	for (i = 0; i < bp->cp_nr_rings; i++) {
		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
		struct bnxt_napi *bnapi = bp->bnapi[i];
		struct bnxt_cp_ring_info *cpr;

		napi_disable(&bp->bnapi[i]->napi);
		if (bp->bnapi[i]->rx_ring)
		cpr = &bnapi->cp_ring;
		if (bnapi->tx_fault)
			cpr->sw_stats.tx.tx_resets++;
		if (bnapi->in_reset)
			cpr->sw_stats.rx.rx_resets++;
		napi_disable(&bnapi->napi);
		if (bnapi->rx_ring)
			cancel_work_sync(&cpr->dim.work);
	}
}
@@ -9468,8 +9419,6 @@ static void bnxt_enable_napi(struct bnxt *bp)
		bnapi->tx_fault = 0;

		cpr = &bnapi->cp_ring;
		if (bnapi->in_reset)
			cpr->sw_stats.rx.rx_resets++;
		bnapi->in_reset = false;

		bnapi->tx_pkts = 0;
@@ -10738,8 +10687,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
	bnxt_free_skbs(bp);

	/* Save ring stats before shutdown */
	if (bp->bnapi && irq_re_init)
	if (bp->bnapi && irq_re_init) {
		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
		bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
	}
	if (irq_re_init) {
		bnxt_free_irq(bp);
		bnxt_del_napi(bp);
@@ -10988,6 +10939,35 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
}

static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
					struct bnxt_total_ring_err_stats *stats,
					struct bnxt_cp_ring_info *cpr)
{
	struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
	u64 *hw_stats = cpr->stats.sw_stats;

	stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
	stats->rx_total_resets += sw_stats->rx.rx_resets;
	stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
	stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
	stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
	stats->rx_total_ring_discards +=
		BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
	stats->tx_total_resets += sw_stats->tx.tx_resets;
	stats->tx_total_ring_discards +=
		BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
	stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
}

void bnxt_get_ring_err_stats(struct bnxt *bp,
			     struct bnxt_total_ring_err_stats *stats)
{
	int i;

	for (i = 0; i < bp->cp_nr_rings; i++)
		bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
}

static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
{
	struct net_device *dev = bp->dev;
+21 −3
Original line number Diff line number Diff line
@@ -919,9 +919,6 @@ struct bnxt_rx_ring_info {
	unsigned long		*rx_agg_bmap;
	u16			rx_agg_bmap_size;

	struct page		*rx_page;
	unsigned int		rx_page_offset;

	dma_addr_t		rx_desc_mapping[MAX_RX_PAGES];
	dma_addr_t		rx_agg_desc_mapping[MAX_RX_AGG_PAGES];

@@ -942,15 +939,32 @@ struct bnxt_rx_sw_stats {
	u64			rx_netpoll_discards;
};

struct bnxt_tx_sw_stats {
	u64			tx_resets;
};

struct bnxt_cmn_sw_stats {
	u64			missed_irqs;
};

struct bnxt_sw_stats {
	struct bnxt_rx_sw_stats rx;
	struct bnxt_tx_sw_stats tx;
	struct bnxt_cmn_sw_stats cmn;
};

struct bnxt_total_ring_err_stats {
	u64			rx_total_l4_csum_errors;
	u64			rx_total_resets;
	u64			rx_total_buf_errors;
	u64			rx_total_oom_discards;
	u64			rx_total_netpoll_discards;
	u64			rx_total_ring_discards;
	u64			tx_total_resets;
	u64			tx_total_ring_discards;
	u64			total_missed_irqs;
};

struct bnxt_stats_mem {
	u64		*sw_stats;
	u64		*hw_masks;
@@ -2021,6 +2035,8 @@ struct bnxt {
	u8			pri2cos_idx[8];
	u8			pri2cos_valid;

	struct bnxt_total_ring_err_stats ring_err_stats_prev;

	u16			hwrm_max_req_len;
	u16			hwrm_max_ext_req_len;
	unsigned int		hwrm_cmd_timeout;
@@ -2347,6 +2363,8 @@ int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
void bnxt_reenable_sriov(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_get_ring_err_stats(struct bnxt *bp,
			     struct bnxt_total_ring_err_stats *stats);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
			 u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
+24 −25
Original line number Diff line number Diff line
@@ -339,13 +339,16 @@ enum {
	RX_NETPOLL_DISCARDS,
};

static struct {
	u64			counter;
	char			string[ETH_GSTRING_LEN];
} bnxt_sw_func_stats[] = {
	{0, "rx_total_discard_pkts"},
	{0, "tx_total_discard_pkts"},
	{0, "rx_total_netpoll_discards"},
static const char *const bnxt_ring_err_stats_arr[] = {
	"rx_total_l4_csum_errors",
	"rx_total_resets",
	"rx_total_buf_errors",
	"rx_total_oom_discards",
	"rx_total_netpoll_discards",
	"rx_total_ring_discards",
	"tx_total_resets",
	"tx_total_ring_discards",
	"total_missed_irqs",
};

#define NUM_RING_RX_SW_STATS		ARRAY_SIZE(bnxt_rx_sw_stats_str)
@@ -495,7 +498,7 @@ static const struct {
	BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};

#define BNXT_NUM_SW_FUNC_STATS	ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_RING_ERR_STATS	ARRAY_SIZE(bnxt_ring_err_stats_arr)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI			\
	(ARRAY_SIZE(bnxt_rx_bytes_pri_arr) +	\
@@ -532,7 +535,7 @@ static int bnxt_get_num_stats(struct bnxt *bp)
{
	int num_stats = bnxt_get_num_ring_stats(bp);

	num_stats += BNXT_NUM_SW_FUNC_STATS;
	num_stats += BNXT_NUM_RING_ERR_STATS;

	if (bp->flags & BNXT_FLAG_PORT_STATS)
		num_stats += BNXT_NUM_PORT_STATS;
@@ -583,18 +586,17 @@ static bool is_tx_ring(struct bnxt *bp, int ring_num)
static void bnxt_get_ethtool_stats(struct net_device *dev,
				   struct ethtool_stats *stats, u64 *buf)
{
	u32 i, j = 0;
	struct bnxt_total_ring_err_stats ring_err_stats = {0};
	struct bnxt *bp = netdev_priv(dev);
	u64 *curr, *prev;
	u32 tpa_stats;
	u32 i, j = 0;

	if (!bp->bnapi) {
		j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS;
		j += bnxt_get_num_ring_stats(bp);
		goto skip_ring_stats;
	}

	for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
		bnxt_sw_func_stats[i].counter = 0;

	tpa_stats = bnxt_get_num_tpa_ring_stats(bp);
	for (i = 0; i < bp->cp_nr_rings; i++) {
		struct bnxt_napi *bnapi = bp->bnapi[i];
@@ -631,19 +633,16 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
		sw = (u64 *)&cpr->sw_stats.cmn;
		for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
			buf[j] = sw[k];

		bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
			BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts);
		bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
			BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts);
		bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter +=
			cpr->sw_stats.rx.rx_netpoll_discards;
	}

	for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
		buf[j] = bnxt_sw_func_stats[i].counter;
	bnxt_get_ring_err_stats(bp, &ring_err_stats);

skip_ring_stats:
	curr = &ring_err_stats.rx_total_l4_csum_errors;
	prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors;
	for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++)
		buf[j] = *curr + *prev;

	if (bp->flags & BNXT_FLAG_PORT_STATS) {
		u64 *port_stats = bp->port_stats.sw_stats;

@@ -745,8 +744,8 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
				buf += ETH_GSTRING_LEN;
			}
		}
		for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
			strcpy(buf, bnxt_sw_func_stats[i].string);
		for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) {
			strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN);
			buf += ETH_GSTRING_LEN;
		}