Commit c7a21904 authored by Michal Swiatkowski's avatar Michal Swiatkowski Committed by Tony Nguyen
Browse files

ice: Remove xsk_buff_pool from VSI structure



Current implementation of netdev already contains xsk_buff_pools.
We no longer have to contain these structures in ice_vsi.

Refactor the code to operate on netdev-provided xsk_buff_pools.

Move scheduling napi on each queue to a separate function to
simplify setup function.

Signed-off-by: default avatarMichal Swiatkowski <michal.swiatkowski@intel.com>
Reviewed-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: default avatarKiran Bhandare <kiranx.bhandare@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 34295a36
Loading
Loading
Loading
Loading
+3 −7
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
#include <net/xdp_sock_drv.h>
#include <net/geneve.h>
#include <net/gre.h>
#include <net/udp_tunnel.h>
@@ -326,9 +327,6 @@ struct ice_vsi {
	struct ice_ring **xdp_rings;	 /* XDP ring array */
	u16 num_xdp_txq;		 /* Used XDP queues */
	u8 xdp_mapping_mode;		 /* ICE_MAP_MODE_[CONTIG|SCATTER] */
	struct xsk_buff_pool **xsk_pools;
	u16 num_xsk_pools_used;
	u16 num_xsk_pools;
} ____cacheline_internodealigned_in_smp;

/* struct that defines an interrupt vector */
@@ -517,17 +515,15 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
 */
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
	struct xsk_buff_pool **pools = ring->vsi->xsk_pools;
	u16 qid = ring->q_index;

	if (ice_ring_is_xdp(ring))
		qid -= ring->vsi->num_xdp_txq;

	if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] ||
	    !ice_is_xdp_ena_vsi(ring->vsi))
	if (!ice_is_xdp_ena_vsi(ring->vsi))
		return NULL;

	return pools[qid];
	return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
}

/**
+18 −10
Original line number Diff line number Diff line
@@ -2475,6 +2475,22 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
			       max_txqs);
}

/**
 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
 * @vsi: VSI to schedule napi on
 */
static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
{
	int i;

	ice_for_each_rxq(vsi, i) {
		struct ice_ring *rx_ring = vsi->rx_rings[i];

		if (rx_ring->xsk_pool)
			napi_schedule(&rx_ring->q_vector->napi);
	}
}

/**
 * ice_xdp_setup_prog - Add or remove XDP eBPF program
 * @vsi: VSI to setup XDP for
@@ -2519,16 +2535,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
	if (if_running)
		ret = ice_up(vsi);

	if (!ret && prog && vsi->xsk_pools) {
		int i;

		ice_for_each_rxq(vsi, i) {
			struct ice_ring *rx_ring = vsi->rx_rings[i];

			if (rx_ring->xsk_pool)
				napi_schedule(&rx_ring->q_vector->napi);
		}
	}
	if (!ret && prog)
		ice_vsi_rx_napi_schedule(vsi);

	return (ret || xdp_ring_err) ? -ENOMEM : 0;
}
+9 −62
Original line number Diff line number Diff line
@@ -259,45 +259,6 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
	return err;
}

/**
 * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
 * @vsi: VSI to allocate the buffer pool on
 *
 * Returns 0 on success, negative on error
 */
static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
{
	if (vsi->xsk_pools)
		return 0;

	vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
				 GFP_KERNEL);

	if (!vsi->xsk_pools) {
		vsi->num_xsk_pools = 0;
		return -ENOMEM;
	}

	return 0;
}

/**
 * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
 * @vsi: VSI from which the VSI will be removed
 * @qid: Ring/qid associated with the buffer pool
 */
static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
{
	vsi->xsk_pools[qid] = NULL;
	vsi->num_xsk_pools_used--;

	if (vsi->num_xsk_pools_used == 0) {
		kfree(vsi->xsk_pools);
		vsi->xsk_pools = NULL;
		vsi->num_xsk_pools = 0;
	}
}

/**
 * ice_xsk_pool_disable - disable a buffer pool region
 * @vsi: Current VSI
@@ -307,12 +268,12 @@ static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
 */
static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{
	if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
	    !vsi->xsk_pools[qid])
	struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);

	if (!pool)
		return -EINVAL;

	xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
	ice_xsk_remove_pool(vsi, qid);
	xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);

	return 0;
}
@@ -333,22 +294,11 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
	if (vsi->type != ICE_VSI_PF)
		return -EINVAL;

	if (!vsi->num_xsk_pools)
		vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
	if (qid >= vsi->num_xsk_pools)
	if (qid >= vsi->netdev->real_num_rx_queues ||
	    qid >= vsi->netdev->real_num_tx_queues)
		return -EINVAL;

	err = ice_xsk_alloc_pools(vsi);
	if (err)
		return err;

	if (vsi->xsk_pools && vsi->xsk_pools[qid])
		return -EBUSY;

	vsi->xsk_pools[qid] = pool;
	vsi->num_xsk_pools_used++;

	err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
	err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
			       ICE_RX_DMA_ATTR);
	if (err)
		return err;
@@ -842,11 +792,8 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{
	int i;

	if (!vsi->xsk_pools)
		return false;

	for (i = 0; i < vsi->num_xsk_pools; i++) {
		if (vsi->xsk_pools[i])
	ice_for_each_rxq(vsi, i) {
		if (xsk_get_pool_from_qid(vsi->netdev, i))
			return true;
	}