Commit 844f63f5 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'nfp-AF_XDP-zero-copy'



Simon Horman says:

====================
Add AF_XDP zero-copy support for NFP

Niklas Söderlund says:

This series adds AF_XDP zero-copy support for the NFP driver. The series
is based on previous work done by Jakub Kicinski.

Patch 1/5 and 2/5 prepares the driver for AF_XDP support by refactoring
functions that will act differently once AF_XDP is active or not making
the driver easier to read and by preparing some functions to be reused
outside the local file scope. Patch 3/5 and 4/5 prepares the driver for
dealing the UMEM while finally patch 5/5 adds AF_XDP support.

Based on work by Jakub Kicinski.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 34fe804e 6402528b
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -31,6 +31,7 @@ nfp-objs := \
	    nfp_net_main.o \
	    nfp_net_repr.o \
	    nfp_net_sriov.o \
	    nfp_net_xsk.o \
	    nfp_netvf_main.o \
	    nfp_port.o \
	    nfp_shared_buf.o \
+45 −2
Original line number Diff line number Diff line
@@ -109,6 +109,7 @@ struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
struct nfp_port;
struct xsk_buff_pool;

/* Convenience macro for wrapping descriptor index on ring size */
#define D_IDX(ring, idx)	((idx) & ((ring)->cnt - 1))
@@ -170,11 +171,14 @@ struct nfp_net_tx_desc {
 * struct nfp_net_tx_buf - software TX buffer descriptor
 * @skb:	normal ring, sk_buff associated with this buffer
 * @frag:	XDP ring, page frag associated with this buffer
 * @xdp:	XSK buffer pool handle (for AF_XDP)
 * @dma_addr:	DMA mapping address of the buffer
 * @fidx:	Fragment index (-1 for the head and [0..nr_frags-1] for frags)
 * @pkt_cnt:	Number of packets to be produced out of the skb associated
 *		with this buffer (valid only on the head's buffer).
 *		Will be 1 for all non-TSO packets.
 * @is_xsk_tx:	Flag if buffer is a RX buffer after a XDP_TX action and not a
 *		buffer from the TX queue (for AF_XDP).
 * @real_len:	Number of bytes which to be produced out of the skb (valid only
 *		on the head's buffer). Equal to skb->len for non-TSO packets.
 */
@@ -182,10 +186,18 @@ struct nfp_net_tx_buf {
	union {
		struct sk_buff *skb;
		void *frag;
		struct xdp_buff *xdp;
	};
	dma_addr_t dma_addr;
	union {
		struct {
			short int fidx;
			u16 pkt_cnt;
		};
		struct {
			bool is_xsk_tx;
		};
	};
	u32 real_len;
};

@@ -314,6 +326,16 @@ struct nfp_net_rx_buf {
	dma_addr_t dma_addr;
};

/**
 * struct nfp_net_xsk_rx_buf - software RX XSK buffer descriptor
 * @dma_addr:	DMA mapping address of the buffer
 * @xdp:	XSK buffer pool handle (for AF_XDP)
 */
struct nfp_net_xsk_rx_buf {
	dma_addr_t dma_addr;
	struct xdp_buff *xdp;
};

/**
 * struct nfp_net_rx_ring - RX ring structure
 * @r_vec:      Back pointer to ring vector structure
@@ -324,6 +346,7 @@ struct nfp_net_rx_buf {
 * @fl_qcidx:   Queue Controller Peripheral (QCP) queue index for the freelist
 * @qcp_fl:     Pointer to base of the QCP freelist queue
 * @rxbufs:     Array of transmitted FL/RX buffers
 * @xsk_rxbufs: Array of transmitted FL/RX buffers (for AF_XDP)
 * @rxds:       Virtual address of FL/RX ring in host memory
 * @xdp_rxq:    RX-ring info avail for XDP
 * @dma:        DMA address of the FL/RX ring
@@ -342,6 +365,7 @@ struct nfp_net_rx_ring {
	u8 __iomem *qcp_fl;

	struct nfp_net_rx_buf *rxbufs;
	struct nfp_net_xsk_rx_buf *xsk_rxbufs;
	struct nfp_net_rx_desc *rxds;

	struct xdp_rxq_info xdp_rxq;
@@ -360,6 +384,7 @@ struct nfp_net_rx_ring {
 * @tx_ring:        Pointer to TX ring
 * @rx_ring:        Pointer to RX ring
 * @xdp_ring:	    Pointer to an extra TX ring for XDP
 * @xsk_pool:	    XSK buffer pool active on vector queue pair (for AF_XDP)
 * @irq_entry:      MSI-X table entry (use for talking to the device)
 * @event_ctr:	    Number of interrupt
 * @rx_dim:	    Dynamic interrupt moderation structure for RX
@@ -431,6 +456,7 @@ struct nfp_net_r_vector {
	u64 rx_replace_buf_alloc_fail;

	struct nfp_net_tx_ring *xdp_ring;
	struct xsk_buff_pool *xsk_pool;

	struct u64_stats_sync tx_sync;
	u64 tx_pkts;
@@ -501,6 +527,7 @@ struct nfp_stat_pair {
 * @num_stack_tx_rings:	Number of TX rings used by the stack (not XDP)
 * @num_rx_rings:	Currently configured number of RX rings
 * @mtu:		Device MTU
 * @xsk_pools:		XSK buffer pools, @max_r_vecs in size (for AF_XDP).
 */
struct nfp_net_dp {
	struct device *dev;
@@ -537,6 +564,8 @@ struct nfp_net_dp {
	unsigned int num_rx_rings;

	unsigned int mtu;

	struct xsk_buff_pool **xsk_pools;
};

/**
@@ -965,6 +994,7 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);

void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
		   unsigned int min_irqs, unsigned int want_irqs);
@@ -973,6 +1003,19 @@ void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
		    unsigned int n);

void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring);
void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget);

bool
nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
		   void *data, void *pkt, unsigned int pkt_len, int meta_len);

void nfp_net_rx_csum(const struct nfp_net_dp *dp,
		     struct nfp_net_r_vector *r_vec,
		     const struct nfp_net_rx_desc *rxd,
		     const struct nfp_meta_parsed *meta,
		     struct sk_buff *skb);

struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
			  struct netlink_ext_ack *extack);
+167 −40
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@

#include <net/tls.h>
#include <net/vxlan.h>
#include <net/xdp_sock_drv.h>

#include "nfpcore/nfp_nsp.h"
#include "ccm.h"
@@ -45,6 +46,7 @@
#include "nfp_net_ctrl.h"
#include "nfp_net.h"
#include "nfp_net_sriov.h"
#include "nfp_net_xsk.h"
#include "nfp_port.h"
#include "crypto/crypto.h"
#include "crypto/fw.h"
@@ -381,7 +383,7 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
 *
 * Clear the ICR for the IRQ entry.
 */
static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
{
	nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
	nn_pci_flush(nn);
@@ -923,7 +925,7 @@ static void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
#endif
}

static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
{
	wmb();
	nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
@@ -1142,7 +1144,7 @@ static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
 * @tx_ring:	TX ring structure
 * @budget:	NAPI budget (only used as bool to determine if in NAPI context)
 */
static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
{
	struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
	struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
@@ -1315,6 +1317,9 @@ nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
		tx_ring->rd_p++;
	}

	if (tx_ring->is_xdp)
		nfp_net_xsk_tx_bufs_free(tx_ring);

	memset(tx_ring->txds, 0, tx_ring->size);
	tx_ring->wr_p = 0;
	tx_ring->rd_p = 0;
@@ -1338,24 +1343,43 @@ static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/* Receive processing
 */
static unsigned int
nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
{
	unsigned int fl_bufsz;
	unsigned int fl_bufsz = 0;

	fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
	fl_bufsz += dp->rx_dma_off;
	if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
		fl_bufsz += NFP_NET_MAX_PREPEND;
	else
		fl_bufsz += dp->rx_offset;
	fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;

	return fl_bufsz;
}

static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
{
	unsigned int fl_bufsz;

	fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
	fl_bufsz += dp->rx_dma_off;
	fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);

	fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
	fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));

	return fl_bufsz;
}

static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
{
	unsigned int fl_bufsz;

	fl_bufsz = XDP_PACKET_HEADROOM;
	fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);

	return fl_bufsz;
}

static void
nfp_net_free_frag(void *frag, bool xdp)
{
@@ -1484,10 +1508,14 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
	/* Move the empty entry to the end of the list */
	wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
	last_idx = rx_ring->cnt - 1;
	rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
	rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
	rx_ring->rxbufs[last_idx].dma_addr = 0;
	rx_ring->rxbufs[last_idx].frag = NULL;
	if (rx_ring->r_vec->xsk_pool) {
		rx_ring->xsk_rxbufs[wr_idx] = rx_ring->xsk_rxbufs[last_idx];
		memset(&rx_ring->xsk_rxbufs[last_idx], 0,
		       sizeof(*rx_ring->xsk_rxbufs));
	} else {
		rx_ring->rxbufs[wr_idx] = rx_ring->rxbufs[last_idx];
		memset(&rx_ring->rxbufs[last_idx], 0, sizeof(*rx_ring->rxbufs));
	}

	memset(rx_ring->rxds, 0, rx_ring->size);
	rx_ring->wr_p = 0;
@@ -1509,6 +1537,9 @@ nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
{
	unsigned int i;

	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
		return;

	for (i = 0; i < rx_ring->cnt - 1; i++) {
		/* NULL skb can only happen when initial filling of the ring
		 * fails to allocate enough buffers and calls here to free
@@ -1536,6 +1567,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
	struct nfp_net_rx_buf *rxbufs;
	unsigned int i;

	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
		return 0;

	rxbufs = rx_ring->rxbufs;

	for (i = 0; i < rx_ring->cnt - 1; i++) {
@@ -1560,6 +1594,9 @@ nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
{
	unsigned int i;

	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
		return nfp_net_xsk_rx_ring_fill_freelist(rx_ring);

	for (i = 0; i < rx_ring->cnt - 1; i++)
		nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
				    rx_ring->rxbufs[i].dma_addr);
@@ -1587,10 +1624,10 @@ static int nfp_net_rx_csum_has_errors(u16 flags)
 * @meta: Parsed metadata prepend
 * @skb: Pointer to SKB
 */
static void nfp_net_rx_csum(struct nfp_net_dp *dp,
void nfp_net_rx_csum(const struct nfp_net_dp *dp,
		     struct nfp_net_r_vector *r_vec,
			    struct nfp_net_rx_desc *rxd,
			    struct nfp_meta_parsed *meta, struct sk_buff *skb)
		     const struct nfp_net_rx_desc *rxd,
		     const struct nfp_meta_parsed *meta, struct sk_buff *skb)
{
	skb_checksum_none_assert(skb);

@@ -1668,7 +1705,7 @@ nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
			 &rx_hash->hash);
}

static bool
bool
nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
		   void *data, void *pkt, unsigned int pkt_len, int meta_len)
{
@@ -2540,6 +2577,10 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)

	if (dp->netdev)
		xdp_rxq_info_unreg(&rx_ring->xdp_rxq);

	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx))
		kvfree(rx_ring->xsk_rxbufs);
	else
		kvfree(rx_ring->rxbufs);

	if (rx_ring->rxds)
@@ -2548,6 +2589,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)

	rx_ring->cnt = 0;
	rx_ring->rxbufs = NULL;
	rx_ring->xsk_rxbufs = NULL;
	rx_ring->rxds = NULL;
	rx_ring->dma = 0;
	rx_ring->size = 0;
@@ -2563,8 +2605,18 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
static int
nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{
	enum xdp_mem_type mem_type;
	size_t rxbuf_sw_desc_sz;
	int err;

	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
		mem_type = MEM_TYPE_XSK_BUFF_POOL;
		rxbuf_sw_desc_sz = sizeof(*rx_ring->xsk_rxbufs);
	} else {
		mem_type = MEM_TYPE_PAGE_ORDER0;
		rxbuf_sw_desc_sz = sizeof(*rx_ring->rxbufs);
	}

	if (dp->netdev) {
		err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
				       rx_ring->idx, rx_ring->r_vec->napi.napi_id);
@@ -2572,6 +2624,10 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
			return err;
	}

	err = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, mem_type, NULL);
	if (err)
		goto err_alloc;

	rx_ring->cnt = dp->rxd_cnt;
	rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
	rx_ring->rxds = dma_alloc_coherent(dp->dev, rx_ring->size,
@@ -2583,10 +2639,17 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
		goto err_alloc;
	}

	rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
	if (nfp_net_has_xsk_pool_slow(dp, rx_ring->idx)) {
		rx_ring->xsk_rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
					       GFP_KERNEL);
		if (!rx_ring->xsk_rxbufs)
			goto err_alloc;
	} else {
		rx_ring->rxbufs = kvcalloc(rx_ring->cnt, rxbuf_sw_desc_sz,
					   GFP_KERNEL);
		if (!rx_ring->rxbufs)
			goto err_alloc;
	}

	return 0;

@@ -2638,6 +2701,27 @@ static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
	kfree(dp->rx_rings);
}

static void
nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
{
	if (dp->netdev)
		netif_napi_add(dp->netdev, &r_vec->napi,
			       nfp_net_has_xsk_pool_slow(dp, idx) ?
			       nfp_net_xsk_poll : nfp_net_poll,
			       NAPI_POLL_WEIGHT);
	else
		tasklet_enable(&r_vec->tasklet);
}

static void
nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec)
{
	if (dp->netdev)
		netif_napi_del(&r_vec->napi);
	else
		tasklet_disable(&r_vec->tasklet);
}

static void
nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
			    struct nfp_net_r_vector *r_vec, int idx)
@@ -2648,6 +2732,17 @@ nfp_net_vector_assign_rings(struct nfp_net_dp *dp,

	r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
		&dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;

	if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) {
		r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL;

		if (r_vec->xsk_pool)
			xsk_pool_set_rxq_info(r_vec->xsk_pool,
					      &r_vec->rx_ring->xdp_rxq);

		nfp_net_napi_del(dp, r_vec);
		nfp_net_napi_add(dp, r_vec, idx);
	}
}

static int
@@ -2656,23 +2751,14 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
{
	int err;

	/* Setup NAPI */
	if (nn->dp.netdev)
		netif_napi_add(nn->dp.netdev, &r_vec->napi,
			       nfp_net_poll, NAPI_POLL_WEIGHT);
	else
		tasklet_enable(&r_vec->tasklet);
	nfp_net_napi_add(&nn->dp, r_vec, idx);

	snprintf(r_vec->name, sizeof(r_vec->name),
		 "%s-rxtx-%d", nfp_net_name(nn), idx);
	err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
			  r_vec);
	if (err) {
		if (nn->dp.netdev)
			netif_napi_del(&r_vec->napi);
		else
			tasklet_disable(&r_vec->tasklet);

		nfp_net_napi_del(&nn->dp, r_vec);
		nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
		return err;
	}
@@ -2690,11 +2776,7 @@ static void
nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
{
	irq_set_affinity_hint(r_vec->irq_vector, NULL);
	if (nn->dp.netdev)
		netif_napi_del(&r_vec->napi);
	else
		tasklet_disable(&r_vec->tasklet);

	nfp_net_napi_del(&nn->dp, r_vec);
	free_irq(r_vec->irq_vector, r_vec);
}

@@ -2808,8 +2890,11 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
	if (err)
		nn_err(nn, "Could not disable device: %d\n", err);

	for (r = 0; r < nn->dp.num_rx_rings; r++)
	for (r = 0; r < nn->dp.num_rx_rings; r++) {
		nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
		if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
			nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]);
	}
	for (r = 0; r < nn->dp.num_tx_rings; r++)
		nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
	for (r = 0; r < nn->dp.num_r_vecs; r++)
@@ -3296,6 +3381,15 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)

	*new = nn->dp;

	new->xsk_pools = kmemdup(new->xsk_pools,
				 array_size(nn->max_r_vecs,
					    sizeof(new->xsk_pools)),
				 GFP_KERNEL);
	if (!new->xsk_pools) {
		kfree(new);
		return NULL;
	}

	/* Clear things which need to be recomputed */
	new->fl_bufsz = 0;
	new->tx_rings = NULL;
@@ -3306,10 +3400,18 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
	return new;
}

static void nfp_net_free_dp(struct nfp_net_dp *dp)
{
	kfree(dp->xsk_pools);
	kfree(dp);
}

static int
nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
		     struct netlink_ext_ack *extack)
{
	unsigned int r, xsk_min_fl_bufsz;

	/* XDP-enabled tests */
	if (!dp->xdp_prog)
		return 0;
@@ -3322,6 +3424,18 @@ nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
		return -EINVAL;
	}

	xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp);
	for (r = 0; r < nn->max_r_vecs; r++) {
		if (!dp->xsk_pools[r])
			continue;

		if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) {
			NL_SET_ERR_MSG_MOD(extack,
					   "XSK buffer pool chunk size too small\n");
			return -EINVAL;
		}
	}

	return 0;
}

@@ -3389,7 +3503,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,

	nfp_net_open_stack(nn);
exit_free_dp:
	kfree(dp);
	nfp_net_free_dp(dp);

	return err;

@@ -3398,7 +3512,7 @@ int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
err_cleanup_vecs:
	for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
		nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
	kfree(dp);
	nfp_net_free_dp(dp);
	return err;
}

@@ -3716,6 +3830,9 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
		return nfp_net_xdp_setup_drv(nn, xdp);
	case XDP_SETUP_PROG_HW:
		return nfp_net_xdp_setup_hw(nn, xdp);
	case XDP_SETUP_XSK_POOL:
		return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool,
					      xdp->xsk.queue_id);
	default:
		return nfp_app_bpf(nn->app, nn, xdp);
	}
@@ -3766,6 +3883,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
	.ndo_features_check	= nfp_net_features_check,
	.ndo_get_phys_port_name	= nfp_net_get_phys_port_name,
	.ndo_bpf		= nfp_net_xdp,
	.ndo_xsk_wakeup		= nfp_net_xsk_wakeup,
	.ndo_get_devlink_port	= nfp_devlink_get_devlink_port,
};

@@ -3893,6 +4011,14 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
	nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
	nn->dp.num_r_vecs = min_t(unsigned int,
				  nn->dp.num_r_vecs, num_online_cpus());
	nn->max_r_vecs = nn->dp.num_r_vecs;

	nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
				   GFP_KERNEL);
	if (!nn->dp.xsk_pools) {
		err = -ENOMEM;
		goto err_free_nn;
	}

	nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
	nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
@@ -3932,6 +4058,7 @@ void nfp_net_free(struct nfp_net *nn)
	WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
	nfp_ccm_mbox_free(nn);

	kfree(nn->dp.xsk_pools);
	if (nn->dp.netdev)
		free_netdev(nn->dp.netdev);
	else
+21 −12
Original line number Diff line number Diff line
@@ -42,6 +42,7 @@ static int nfp_rx_q_show(struct seq_file *file, void *data)
		seq_printf(file, "%04d: 0x%08x 0x%08x", i,
			   rxd->vals[0], rxd->vals[1]);

		if (!r_vec->xsk_pool) {
			frag = READ_ONCE(rx_ring->rxbufs[i].frag);
			if (frag)
				seq_printf(file, " frag=%p", frag);
@@ -49,6 +50,11 @@ static int nfp_rx_q_show(struct seq_file *file, void *data)
			if (rx_ring->rxbufs[i].dma_addr)
				seq_printf(file, " dma_addr=%pad",
					   &rx_ring->rxbufs[i].dma_addr);
		} else {
			if (rx_ring->xsk_rxbufs[i].dma_addr)
				seq_printf(file, " dma_addr=%pad",
					   &rx_ring->xsk_rxbufs[i].dma_addr);
		}

		if (i == rx_ring->rd_p % rxd_cnt)
			seq_puts(file, " H_RD ");
@@ -103,20 +109,23 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
		   tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);

	for (i = 0; i < txd_cnt; i++) {
		struct xdp_buff *xdp;
		struct sk_buff *skb;

		txd = &tx_ring->txds[i];
		seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
			   txd->vals[0], txd->vals[1],
			   txd->vals[2], txd->vals[3]);

		if (tx_ring == r_vec->tx_ring) {
			struct sk_buff *skb = READ_ONCE(tx_ring->txbufs[i].skb);

		if (!tx_ring->is_xdp) {
			skb = READ_ONCE(tx_ring->txbufs[i].skb);
			if (skb)
				seq_printf(file, " skb->head=%p skb->data=%p",
					   skb->head, skb->data);
		} else {
			seq_printf(file, " frag=%p",
				   READ_ONCE(tx_ring->txbufs[i].frag));
			xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
			if (xdp)
				seq_printf(file, " xdp->data=%p", xdp->data);
		}

		if (tx_ring->txbufs[i].dma_addr)
+592 −0

File added.

Preview size limit exceeded, changes collapsed.

Loading