Commit ff254dad authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'mlx5-updates-2021-04-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



Saeed Mahameed says:

====================
mlx5-updates-2021-04-19

This patchset provides some updates to mlx5e and mlx5 SW steering drivers:

1) Tariq and Vladyslav they both provide some trivial update to mlx5e netdev.

The next 12 patches in the patchset are focused toward mlx5 SW steering:
2) 3 trivial cleanup patches

3) Dynamic Flex parser support:
   Flex parser is a HW parser that can support protocols that are not
    natively supported by the HCA, such as Geneve (TLV options) and GTP-U.
    There are 8 such parsers, and each of them can be assigned to parse a
    specific set of protocols.

4) Enable matching on Geneve TLV options

5) Use Flex parser for MPLS over UDP/GRE

6) Enable matching on tunnel GTP-U and GTP-U first extension
   header using

7) Improved QoS for SW steering internal QPair for a better insertion rate
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 316bcffe aeacb52a
Loading
Loading
Loading
Loading
+7 −2
Original line number Diff line number Diff line
@@ -55,12 +55,17 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
{
	struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);

	if (dl_port->registered)
		devlink_port_unregister(dl_port);
}

struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
	struct devlink_port *port;

	return mlx5e_devlink_get_dl_port(priv);
	port = mlx5e_devlink_get_dl_port(priv);
	if (port->registered)
		return port;
	return NULL;
}
+59 −32
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"

static bool mlx5e_rx_is_xdp(struct mlx5e_params *params,
			    struct mlx5e_xsk_param *xsk)
@@ -89,30 +90,39 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
	return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
}

#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
					  MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
				  struct mlx5e_params *params,
				  struct mlx5e_xsk_param *xsk)
bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
				   u8 log_stride_sz, u8 log_num_strides)
{
	u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params, xsk);
	s8 signed_log_num_strides_param;
	u8 log_num_strides;
	if (log_stride_sz + log_num_strides != MLX5_MPWRQ_LOG_WQE_SZ)
		return false;

	if (!mlx5e_rx_is_linear_skb(params, xsk))
	if (log_stride_sz < MLX5_MPWQE_LOG_STRIDE_SZ_BASE ||
	    log_stride_sz > MLX5_MPWQE_LOG_STRIDE_SZ_MAX)
		return false;

	if (order_base_2(linear_frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
	if (log_num_strides > MLX5_MPWQE_LOG_NUM_STRIDES_MAX)
		return false;

	if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
		return true;
		return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE;

	return log_num_strides >= MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
}

bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
				  struct mlx5e_params *params,
				  struct mlx5e_xsk_param *xsk)
{
	s8 log_num_strides;
	u8 log_stride_sz;

	if (!mlx5e_rx_is_linear_skb(params, xsk))
		return false;

	log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
	signed_log_num_strides_param =
		(s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
	log_stride_sz = order_base_2(mlx5e_rx_get_linear_frag_sz(params, xsk));
	log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - log_stride_sz;

	return signed_log_num_strides_param >= 0;
	return mlx5e_verify_rx_mpwqe_strides(mdev, log_stride_sz, log_num_strides);
}

u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
@@ -282,7 +292,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
	if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
		return false;

	if (MLX5_IPSEC_DEV(mdev))
	if (mlx5_fpga_is_ipsec_device(mdev))
		return false;

	if (params->xdp_prog) {
@@ -364,7 +374,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
	u32 buf_size = 0;
	int i;

	if (MLX5_IPSEC_DEV(mdev))
	if (mlx5_fpga_is_ipsec_device(mdev))
		byte_count += MLX5E_METADATA_ETHER_LEN;

	if (mlx5e_rx_is_linear_skb(params, xsk)) {
@@ -461,7 +471,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
	param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
}

void mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
			 struct mlx5e_params *params,
			 struct mlx5e_xsk_param *xsk,
			 u16 q_counter,
@@ -472,15 +482,25 @@ void mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
	int ndsegs = 1;

	switch (params->rq_wq_type) {
	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: {
		u8 log_wqe_num_of_strides = mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
		u8 log_wqe_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);

		if (!mlx5e_verify_rx_mpwqe_strides(mdev, log_wqe_stride_size,
						   log_wqe_num_of_strides)) {
			mlx5_core_err(mdev,
				      "Bad RX MPWQE params: log_stride_size %u, log_num_strides %u\n",
				      log_wqe_stride_size, log_wqe_num_of_strides);
			return -EINVAL;
		}

		MLX5_SET(wq, wq, log_wqe_num_of_strides,
			 mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk) -
			 MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
			 log_wqe_num_of_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE);
		MLX5_SET(wq, wq, log_wqe_stride_size,
			 mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk) -
			 MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
			 log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
		MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
		break;
	}
	default: /* MLX5_WQ_TYPE_CYCLIC */
		MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
		mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
@@ -498,6 +518,8 @@ void mlx5e_build_rq_param(struct mlx5_core_dev *mdev,

	param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
	mlx5e_build_rx_cq_param(mdev, params, xsk, &param->cqp);

	return 0;
}

void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
@@ -642,14 +664,17 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
	mlx5e_build_tx_cq_param(mdev, params, &param->cqp);
}

void mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
			      struct mlx5e_params *params,
			      u16 q_counter,
			      struct mlx5e_channel_param *cparam)
{
	u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
	int err;

	mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
	err = mlx5e_build_rq_param(mdev, params, NULL, q_counter, &cparam->rq);
	if (err)
		return err;

	icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
	async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
@@ -658,4 +683,6 @@ void mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
	mlx5e_build_xdpsq_param(mdev, params, &cparam->xdp_sq);
	mlx5e_build_icosq_param(mdev, icosq_log_wq_sz, &cparam->icosq);
	mlx5e_build_async_icosq_param(mdev, async_icosq_log_wq_sz, &cparam->async_icosq);

	return 0;
}
+11 −9
Original line number Diff line number Diff line
@@ -96,6 +96,8 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *para
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);

bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
				   u8 log_stride_sz, u8 log_num_strides);
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
				 struct mlx5e_xsk_param *xsk);
u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
@@ -122,7 +124,7 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
/* Build queue parameters */

void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c);
void mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
			 struct mlx5e_params *params,
			 struct mlx5e_xsk_param *xsk,
			 u16 q_counter,
@@ -141,7 +143,7 @@ void mlx5e_build_tx_cq_param(struct mlx5_core_dev *mdev,
void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev,
			     struct mlx5e_params *params,
			     struct mlx5e_sq_param *param);
void mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
			      struct mlx5e_params *params,
			      u16 q_counter,
			      struct mlx5e_channel_param *cparam);
+12 −3
Original line number Diff line number Diff line
@@ -2086,7 +2086,10 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
	if (!chs->c || !cparam)
		goto err_free;

	mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
	err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam);
	if (err)
		goto err_free;

	for (i = 0; i < chs->num; i++) {
		struct xsk_buff_pool *xsk_pool = NULL;

@@ -4886,6 +4889,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
			  struct net_device *netdev)
{
	struct mlx5e_priv *priv = netdev_priv(netdev);
	struct devlink_port *dl_port;
	int err;

	mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
@@ -4901,6 +4905,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
	if (err)
		mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);

	dl_port = mlx5e_devlink_get_dl_port(priv);
	if (dl_port->registered)
		mlx5e_health_create_reporters(priv);

	return 0;
@@ -4908,6 +4914,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,

static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
	struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);

	if (dl_port->registered)
		mlx5e_health_destroy_reporters(priv);
	mlx5e_tls_cleanup(priv);
	mlx5e_ipsec_cleanup(priv);
+8 −6
Original line number Diff line number Diff line
@@ -83,14 +83,16 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
	ft_attr.autogroup.max_num_groups = 1;
	tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
	if (IS_ERR(tt->termtbl)) {
		esw_warn(dev, "Failed to create termination table\n");
		esw_warn(dev, "Failed to create termination table (error %d)\n",
			 IS_ERR(tt->termtbl));
		return -EOPNOTSUPP;
	}

	tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
				       &tt->dest, 1);
	if (IS_ERR(tt->rule)) {
		esw_warn(dev, "Failed to create termination table rule\n");
		esw_warn(dev, "Failed to create termination table rule (error %d)\n",
			 IS_ERR(tt->rule));
		goto add_flow_err;
	}
	return 0;
@@ -140,10 +142,9 @@ mlx5_eswitch_termtbl_get_create(struct mlx5_eswitch *esw,
	memcpy(&tt->flow_act, flow_act, sizeof(*flow_act));

	err = mlx5_eswitch_termtbl_create(esw->dev, tt, flow_act);
	if (err) {
		esw_warn(esw->dev, "Failed to create termination table\n");
	if (err)
		goto tt_create_err;
	}

	hash_add(esw->offloads.termtbl_tbl, &tt->termtbl_hlist, hash_key);
tt_add_ref:
	tt->ref_count++;
@@ -282,7 +283,8 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
		tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
						     &dest[i], attr);
		if (IS_ERR(tt)) {
			esw_warn(esw->dev, "Failed to create termination table\n");
			esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
				 IS_ERR(tt));
			goto revert_changes;
		}
		attr->dests[num_vport_dests].termtbl = tt;
Loading