Commit 9a94d764 authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'mlx5-updates-2023-06-16' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



mlx5-updates-2023-06-16

1) Added a new event handler to firmware sync reset, which is used to
   support firmware sync reset flow on smart NIC. Adding this new stage to
   the flow enables the firmware to ensure host PFs unload before ECPFs
   unload, to avoid race of PFs recovery.

2) Debugfs for mlx5 eswitch bridge offloads

3) Added two new counters for vport stats

4) Minor Fixups and cleanups for net-next branch

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2dc6af8b 5f2cf757
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -797,6 +797,16 @@ Counters on the NIC port that is connected to a eSwitch.
       RoCE/UD/RC traffic) [#accel]_.
     - Acceleration

   * - `vport_loopback_packets`
     - Unicast, multicast and broadcast packets that were loop-back (received
       and transmitted), IB/Eth  [#accel]_.
     - Acceleration

   * - `vport_loopback_bytes`
     - Unicast, multicast and broadcast bytes that were loop-back (received
       and transmitted), IB/Eth  [#accel]_.
     - Acceleration

   * - `rx_steer_missed_packets`
     - Number of packets that was received by the NIC, however was discarded
       because it did not match any flow in the NIC flow table.
+2 −1
Original line number Diff line number Diff line
@@ -75,7 +75,8 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
				      esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
				      esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o

mlx5_core-$(CONFIG_MLX5_BRIDGE)    += esw/bridge.o esw/bridge_mcast.o en/rep/bridge.o
mlx5_core-$(CONFIG_MLX5_BRIDGE)    += esw/bridge.o esw/bridge_mcast.o esw/bridge_debugfs.o \
				      en/rep/bridge.o

mlx5_core-$(CONFIG_THERMAL)        += thermal.o
mlx5_core-$(CONFIG_MLX5_MPFS)      += lib/mpfs.o
+0 −10
Original line number Diff line number Diff line
@@ -165,15 +165,6 @@ struct page_pool;
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
	MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))

#define MLX5E_MSG_LEVEL			NETIF_MSG_LINK

#define mlx5e_dbg(mlevel, priv, format, ...)                    \
do {                                                            \
	if (NETIF_MSG_##mlevel & (priv)->msglevel)              \
		netdev_warn(priv->netdev, format,               \
			    ##__VA_ARGS__);                     \
} while (0)

#define mlx5e_state_dereference(priv, p) \
	rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))

@@ -880,7 +871,6 @@ struct mlx5e_priv {
#endif
	/* priv data path fields - end */

	u32                        msglevel;
	unsigned long              state;
	struct mutex               state_lock; /* Protects Interface state */
	struct mlx5e_rq            drop_rq;
+23 −21
Original line number Diff line number Diff line
@@ -65,7 +65,8 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
			MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
		total_used += port_buffer->buffer[i].size;

		mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
		netdev_dbg(priv->netdev, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n",
			   i,
			   port_buffer->buffer[i].size,
			   port_buffer->buffer[i].xon,
			   port_buffer->buffer[i].xoff,
@@ -87,7 +88,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
					 port_buffer->internal_buffers_size -
					 port_buffer->headroom_size;

	mlx5e_dbg(HW, priv,
	netdev_dbg(priv->netdev,
		   "total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n",
		   port_buffer->port_buffer_size, port_buffer->headroom_size,
		   port_buffer->internal_buffers_size,
@@ -352,7 +353,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)

	xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;

	mlx5e_dbg(HW, priv, "%s: xoff=%d\n", __func__, xoff);
	netdev_dbg(priv->netdev, "%s: xoff=%d\n", __func__, xoff);
	return xoff;
}

@@ -484,6 +485,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
				    u8 *prio2buffer)
{
	u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
	struct net_device *netdev = priv->netdev;
	struct mlx5e_port_buffer port_buffer;
	u32 xoff = calculate_xoff(priv, mtu);
	bool update_prio2buffer = false;
@@ -495,7 +497,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
	int err;
	int i;

	mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
	netdev_dbg(netdev, "%s: change=%x\n", __func__, change);
	max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);

	err = mlx5e_port_query_buffer(priv, &port_buffer);
@@ -510,7 +512,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
	}

	if (change & MLX5E_PORT_BUFFER_PFC) {
		mlx5e_dbg(HW, priv, "%s: requested PFC per priority bitmask: 0x%x\n",
		netdev_dbg(netdev, "%s: requested PFC per priority bitmask: 0x%x\n",
			   __func__, pfc->pfc_en);
		err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
		if (err)
@@ -526,7 +528,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
	if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
		update_prio2buffer = true;
		for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
			mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
			netdev_dbg(priv->netdev, "%s: requested to map prio[%d] to buffer %d\n",
				   __func__, i, prio2buffer[i]);

		err = fill_pfc_en(priv->mdev, &curr_pfc_en);
@@ -541,9 +543,9 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,

	if (change & MLX5E_PORT_BUFFER_SIZE) {
		for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
			mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
			netdev_dbg(priv->netdev, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
			if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
				mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n",
				netdev_dbg(priv->netdev, "%s: lossless buffer[%d] size cannot be zero\n",
					   __func__, i);
				return -EINVAL;
			}
@@ -552,7 +554,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
			total_used += buffer_size[i];
		}

		mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used);
		netdev_dbg(priv->netdev, "%s: total buffer requested=%d\n", __func__, total_used);

		if (total_used > port_buffer.headroom_size &&
		    (total_used - port_buffer.headroom_size) >
+4 −5
Original line number Diff line number Diff line
@@ -136,7 +136,6 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
	struct mlx5_eswitch *esw = br_offloads->esw;
	u16 vport_num, esw_owner_vhca_id;
	struct netlink_ext_ack *extack;
	int ifindex = upper->ifindex;
	int err = 0;

	if (!netif_is_bridge_master(upper))
@@ -150,15 +149,15 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr

	if (mlx5_esw_bridge_is_local(dev, rep, esw))
		err = info->linking ?
			mlx5_esw_bridge_vport_link(ifindex, vport_num, esw_owner_vhca_id,
			mlx5_esw_bridge_vport_link(upper, vport_num, esw_owner_vhca_id,
						   br_offloads, extack) :
			mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id,
			mlx5_esw_bridge_vport_unlink(upper, vport_num, esw_owner_vhca_id,
						     br_offloads, extack);
	else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
		err = info->linking ?
			mlx5_esw_bridge_vport_peer_link(ifindex, vport_num, esw_owner_vhca_id,
			mlx5_esw_bridge_vport_peer_link(upper, vport_num, esw_owner_vhca_id,
							br_offloads, extack) :
			mlx5_esw_bridge_vport_peer_unlink(ifindex, vport_num, esw_owner_vhca_id,
			mlx5_esw_bridge_vport_peer_unlink(upper, vport_num, esw_owner_vhca_id,
							  br_offloads, extack);

	return err;
Loading