Commit ceed40d7 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge tag 'mlx5-updates-2022-09-27' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-09-27

This is Part #1 of 4 parts series to align mlx5's implementation of
XSK (AF_XDP) RX-Qs indexing and management with other vendors:

Maxim Says:
===========

xsk: Bug fixes for frame mapping on striding RQ

Striding RQ relies on the driver mapping RX buffers into the NIC's
virtual memory space. Currently, regadless of the XSK frame size, mlx5e
maps them using MTT, and each mapping's length is PAGE_SIZE. As the
result, the stride size used by striding RQ is also equal to PAGE_SIZE.

This decision has the following issues:

1. In the XSK aligned mode with frame size smaller than PAGE_SIZE, it's
suboptimal. Using 2K strides and 2K pages allows to post twice as fewer
WQEs.

2. MTT is not suitable for unaligned frames, as it requires natural
alignment theoretically, in practice at least 8-byte alignment.

3. Using mapping and stride bigger than the frame has risk of writing
over the bounds of the XSK frame upon receiving packets bigger than MTU,
which is possible in some specific configurations.

This series addresses issues 1 and 2 and alleviates issue 3. Where
possible, page and stride size will match the XSK frame size (firmware
upgrade may be needed to have effect for 2K frames). Unaligned mode will
use KSM instead of MTT, which allows to drop the partial workaround [1].

[1]: https://lore.kernel.org/netdev/YufYFQ6JN91lQbso@boxer/T/
====================

Link: https://lore.kernel.org/r/20220927203611.244301-1-saeed@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 0d5bfebf 997ce6af
Loading
Loading
Loading
Loading
+14 −23
Original line number Diff line number Diff line
@@ -107,7 +107,6 @@ struct page_pool;
 * dropped by the driver at a later stage.
 */
#define MLX5E_REQUIRED_WQE_MTTS		(MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
#define MLX5E_REQUIRED_MTTS(wqes)	(wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS	\
	(ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
@@ -150,13 +149,6 @@ struct page_pool;
#define MLX5E_TX_XSK_POLL_BUDGET       64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL  500 /* msecs */

#define MLX5E_UMR_WQE_INLINE_SZ \
	(sizeof(struct mlx5e_umr_wqe) + \
	 ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
	       MLX5_UMR_MTT_ALIGNMENT))
#define MLX5E_UMR_WQEBBS \
	(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))

#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
	(sizeof(struct mlx5e_umr_wqe) +\
	(sizeof(struct mlx5_klm) * (sgl_len)))
@@ -174,8 +166,7 @@ struct page_pool;
	ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)

#define MLX5E_MAX_KLM_PER_WQE(mdev) \
	MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \
		mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)))
	MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))

#define MLX5E_MSG_LEVEL			NETIF_MSG_LINK

@@ -227,13 +218,15 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 * bytes units. Driver hardens the limitation to 1KB (16
 * WQEBBs), unless firmware capability is stricter.
 */
static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
static inline u8 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
{
	return min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
	BUILD_BUG_ON(MLX5_SEND_WQE_MAX_WQEBBS > U8_MAX);

	return (u8)min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
			 MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
}

static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
static inline u8 mlx5e_get_max_sq_aligned_wqebbs(struct mlx5_core_dev *mdev)
{
/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
 * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
@@ -242,8 +235,9 @@ static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
 * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
 * cache-aligned.
 */
	u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
	u8 wqebbs = mlx5e_get_max_sq_wqebbs(mdev);

	wqebbs = min_t(u8, wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
#if L1_CACHE_BYTES >= 128
	wqebbs = ALIGN_DOWN(wqebbs, 2);
#endif
@@ -476,7 +470,6 @@ struct mlx5e_txqsq {
	struct work_struct         recover_work;
	struct mlx5e_ptpsq        *ptpsq;
	cqe_ts_to_ns               ptp_cyc2time;
	u16                        max_sq_wqebbs;
} ____cacheline_aligned_in_smp;

struct mlx5e_dma_info {
@@ -580,7 +573,6 @@ struct mlx5e_xdpsq {
	/* control path */
	struct mlx5_wq_ctrl        wq_ctrl;
	struct mlx5e_channel      *channel;
	u16                        max_sq_wqebbs;
} ____cacheline_aligned_in_smp;

struct mlx5e_ktls_resync_resp;
@@ -609,7 +601,6 @@ struct mlx5e_icosq {
	/* control path */
	struct mlx5_wq_ctrl        wq_ctrl;
	struct mlx5e_channel      *channel;
	u16                        max_sq_wqebbs;

	struct work_struct         recover_work;
} ____cacheline_aligned_in_smp;
@@ -620,14 +611,10 @@ struct mlx5e_wqe_frag_info {
	bool last_in_page;
};

struct mlx5e_umr_dma_info {
	struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
};

struct mlx5e_mpw_info {
	struct mlx5e_umr_dma_info umr;
	u16 consumed_strides;
	DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
	struct mlx5e_dma_info dma_info[];
};

#define MLX5E_MAX_RX_FRAGS 4
@@ -717,6 +704,10 @@ struct mlx5e_rq {
			u8                     umr_last_bulk;
			u8                     umr_completed;
			u8                     min_wqe_bulk;
			u8                     page_shift;
			u8                     pages_per_wqe;
			u8                     umr_wqebbs;
			u8                     mtts_per_wqe;
			struct mlx5e_shampo_hd *shampo;
		} mpwqe;
	};
+75 −66
Original line number Diff line number Diff line
@@ -7,10 +7,15 @@
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"

static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
			    struct mlx5e_xsk_param *xsk)
u16 mlx5e_mpwrq_umr_wqe_sz(u8 pages_per_wqe)
{
	return params->xdp_prog || xsk;
	return sizeof(struct mlx5e_umr_wqe) +
		ALIGN(pages_per_wqe * sizeof(struct mlx5_mtt), MLX5_UMR_MTT_ALIGNMENT);
}

u8 mlx5e_mpwrq_umr_wqebbs(u8 pages_per_wqe)
{
	return DIV_ROUND_UP(mlx5e_mpwrq_umr_wqe_sz(pages_per_wqe), MLX5_SEND_WQE_BB);
}

u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
@@ -22,7 +27,7 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
		return xsk->headroom;

	headroom = NET_IP_ALIGN;
	if (mlx5e_rx_is_xdp(params, xsk))
	if (params->xdp_prog)
		headroom += XDP_PACKET_HEADROOM;
	else
		headroom += MLX5_RX_HEADROOM;
@@ -30,67 +35,67 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
	return headroom;
}

u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
				      struct mlx5e_xsk_param *xsk)
{
	u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
	u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);

	return linear_rq_headroom + hw_mtu;
	return xsk->headroom + hw_mtu;
}

static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
				       struct mlx5e_xsk_param *xsk)
static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk)
{
	u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);

	/* AF_XDP doesn't build SKBs in place. */
	if (!xsk)
		frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
	/* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
	u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
	u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);

	/* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
	 * special case. It can run with frames smaller than a page, as it
	 * doesn't allocate pages dynamically. However, here we pretend that
	 * fragments are page-sized: it allows to treat XSK frames like pages
	 * by redirecting alloc and free operations to XSK rings and by using
	 * the fact there are no multiple packets per "page" (which is a frame).
	 * The latter is important, because frames may come in a random order,
	 * and we will have trouble assemblying a real page of multiple frames.
	 */
	if (mlx5e_rx_is_xdp(params, xsk))
		frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
	return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
}

	/* Even if we can go with a smaller fragment size, we must not put
	 * multiple packets into a single frame.
static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5e_params *params,
					 struct mlx5e_xsk_param *xsk)
{
	/* XSK frames are mapped as individual pages, because frames may come in
	 * an arbitrary order from random locations in the UMEM.
	 */
	if (xsk)
		frag_sz = max_t(u32, frag_sz, xsk->chunk_size);
		return PAGE_SIZE;

	return frag_sz;
	/* XDP in mlx5e doesn't support multiple packets per page. */
	if (params->xdp_prog)
		return PAGE_SIZE;

	return roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false));
}

u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
				       struct mlx5e_xsk_param *xsk)
{
	u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
	u32 linear_stride_sz = mlx5e_rx_get_linear_stride_sz(params, xsk);

	return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
	return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_stride_sz);
}

bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
			    struct mlx5e_xsk_param *xsk)
{
	/* AF_XDP allocates SKBs on XDP_PASS - ensure they don't occupy more
	 * than one page. For this, check both with and without xsk.
	if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
		return false;

	/* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
	 * must fit into a CPU page.
	 */
	u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
				 mlx5e_rx_get_linear_frag_sz(params, NULL));
	if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
		return false;

	/* XSK frames must be big enough to hold the packet data. */
	if (xsk && mlx5e_rx_get_linear_sz_xsk(params, xsk) > xsk->chunk_size)
		return false;

	return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
		linear_frag_sz <= PAGE_SIZE;
	return true;
}

bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
static bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
					  u8 log_stride_sz, u8 log_num_strides)
{
	if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
@@ -119,7 +124,7 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
	if (!mlx5e_rx_is_linear_skb(params, xsk))
		return false;

	log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
	log_stride_sz = order_base_2(mlx5e_rx_get_linear_stride_sz(params, xsk));
	log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;

	return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
@@ -164,7 +169,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
				   struct mlx5e_xsk_param *xsk)
{
	if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
		return order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
		return order_base_2(mlx5e_rx_get_linear_stride_sz(params, xsk));

	return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
}
@@ -209,11 +214,11 @@ u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *par
	stop_room  = mlx5e_ktls_get_stop_room(mdev, params);
	stop_room += mlx5e_stop_room_for_max_wqe(mdev);
	if (is_mpwqe)
		/* A MPWQE can take up to the maximum-sized WQE + all the normal
		 * stop room can be taken if a new packet breaks the active
		 * MPWQE session and allocates its WQEs right away.
		/* A MPWQE can take up to the maximum cacheline-aligned WQE +
		 * all the normal stop room can be taken if a new packet breaks
		 * the active MPWQE session and allocates its WQEs right away.
		 */
		stop_room += mlx5e_stop_room_for_max_wqe(mdev);
		stop_room += mlx5e_stop_room_for_mpwqe(mdev);

	return stop_room;
}
@@ -320,22 +325,27 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
		link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
}

bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
				struct mlx5e_params *params)
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
	if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
		return false;
		return -EOPNOTSUPP;

	if (params->xdp_prog) {
		/* XSK params are not considered here. If striding RQ is in use,
		 * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
		 * be called with the known XSK params.
		 */
		if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
			return false;
	if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
		return -EINVAL;

	return 0;
}

	return true;
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
			     struct mlx5e_xsk_param *xsk)
{
	if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
		return -EOPNOTSUPP;

	if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
		return -EINVAL;

	return 0;
}

void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
@@ -356,8 +366,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,

void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
	params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
		MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
	params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
		MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
		MLX5_WQ_TYPE_CYCLIC;
}
@@ -374,7 +383,7 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
	 */
	if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
	     MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
	    mlx5e_striding_rq_possible(mdev, params) &&
	    !mlx5e_mpwrq_validate_regular(mdev, params) &&
	    (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
	     !mlx5e_rx_is_linear_skb(params, NULL)))
		MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
@@ -422,8 +431,7 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
	if (mlx5e_rx_is_linear_skb(params, xsk)) {
		int frag_stride;

		frag_stride = mlx5e_rx_get_linear_frag_sz(params, xsk);
		frag_stride = roundup_pow_of_two(frag_stride);
		frag_stride = mlx5e_rx_get_linear_stride_sz(params, xsk);

		info->arr[0].frag_size = byte_count;
		info->arr[0].frag_stride = frag_stride;
@@ -789,7 +797,8 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
	if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
		return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;

	wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
	wqebbs = mlx5e_mpwrq_umr_wqebbs(MLX5_MPWRQ_PAGES_PER_WQE) *
		(1 << mlx5e_get_rq_log_wq_sz(rqp->rqc));

	/* If XDP program is attached, XSK may be turned on at any time without
	 * restarting the channel. ICOSQ must be big enough to fit UMR WQEs of
+8 −7
Original line number Diff line number Diff line
@@ -84,6 +84,11 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
	return qid < params->num_channels * profile->rq_groups;
}

/* Striding RQ dynamic parameters */

u16 mlx5e_mpwrq_umr_wqe_sz(u8 pages_per_wqe);
u8 mlx5e_mpwrq_umr_wqebbs(u8 pages_per_wqe);

/* Parameter calculations */

void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
@@ -92,19 +97,15 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);

bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
			     struct mlx5e_xsk_param *xsk);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);

bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
				   u8 log_stride_sz, u8 log_num_strides);
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
				 struct mlx5e_xsk_param *xsk);
u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
			     struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
				struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
			    struct mlx5e_xsk_param *xsk);
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
+7 −6
Original line number Diff line number Diff line
@@ -439,15 +439,16 @@ static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
	return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
}

static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
static inline u16 mlx5e_stop_room_for_mpwqe(struct mlx5_core_dev *mdev)
{
	u16 room = sq->reserved_room;
	u8 mpwqe_wqebbs = mlx5e_get_max_sq_aligned_wqebbs(mdev);

	WARN_ONCE(wqe_size > sq->max_sq_wqebbs,
		  "wqe_size %u is greater than max SQ WQEBBs %u",
		  wqe_size, sq->max_sq_wqebbs);
	return mlx5e_stop_room_for_wqe(mdev, mpwqe_wqebbs);
}

	room += MLX5E_STOP_ROOM(wqe_size);
static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
{
	u16 room = sq->reserved_room + MLX5E_STOP_ROOM(wqe_size);

	return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
}
+1 −1
Original line number Diff line number Diff line
@@ -333,7 +333,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx

	mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);

	if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs)))
	if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs)))
		mlx5e_xdp_mpwqe_complete(sq);

	stats->xmit++;
Loading