Commit 55c4bf4d authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge tag 'mlx5-fixes-2022-03-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-03-09

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2022-03-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: SHAMPO, reduce TIR indication
  net/mlx5e: Lag, Only handle events from highest priority multipath entry
  net/mlx5: Fix offloading with ESWITCH_IPV4_TTL_MODIFY_ENABLE
  net/mlx5: Fix a race on command flush flow
  net/mlx5: Fix size field in bufferx_reg struct
====================

Link: https://lore.kernel.org/r/20220309201517.589132-1-saeed@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 37c9d66c 99a2b9be
Loading
Loading
Loading
Loading
+8 −7
Original line number Diff line number Diff line
@@ -131,11 +131,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)

static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
	unsigned long flags;

	spin_lock_irqsave(&cmd->alloc_lock, flags);
	lockdep_assert_held(&cmd->alloc_lock);
	set_bit(idx, &cmd->bitmask);
	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}

static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
@@ -145,17 +142,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)

static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
{
	struct mlx5_cmd *cmd = ent->cmd;
	unsigned long flags;

	spin_lock_irqsave(&cmd->alloc_lock, flags);
	if (!refcount_dec_and_test(&ent->refcnt))
		return;
		goto out;

	if (ent->idx >= 0) {
		struct mlx5_cmd *cmd = ent->cmd;

		cmd_free_index(cmd, ent->idx);
		up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
	}

	cmd_free_ent(ent);
out:
	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}

static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
+0 −3
Original line number Diff line number Diff line
@@ -88,9 +88,6 @@ void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
			 (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
		MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
		break;
	case MLX5E_PACKET_MERGE_SHAMPO:
		MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO);
		break;
	default:
		break;
	}
+1 −2
Original line number Diff line number Diff line
@@ -3616,8 +3616,7 @@ static int set_feature_hw_gro(struct net_device *netdev, bool enable)
		goto out;
	}

	err = mlx5e_safe_switch_params(priv, &new_params,
				       mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
	err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
out:
	mutex_unlock(&priv->state_lock);
	return err;
+8 −3
Original line number Diff line number Diff line
@@ -126,6 +126,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
		return;
	}

	/* Handle multipath entry with lower priority value */
	if (mp->mfi && mp->mfi != fi && fi->fib_priority >= mp->mfi->fib_priority)
		return;

	/* Handle add/replace event */
	nhs = fib_info_num_path(fi);
	if (nhs == 1) {
@@ -135,12 +139,13 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
			int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);

			if (i < 0)
				i = MLX5_LAG_NORMAL_AFFINITY;
			else
				++i;
				return;

			i++;
			mlx5_lag_set_port_affinity(ldev, i);
		}

		mp->mfi = fi;
		return;
	}

+0 −3
Original line number Diff line number Diff line
@@ -121,9 +121,6 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)

u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
	if (!mlx5_chains_prios_supported(chains))
		return 1;

	if (mlx5_chains_ignore_flow_level_supported(chains))
		return UINT_MAX;

Loading