Commit abd5ac18 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge tag 'mlx5-fixes-2022-11-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-11-02

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2022-11-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: TC, Fix slab-out-of-bounds in parse_tc_actions
  net/mlx5e: E-Switch, Fix comparing termination table instance
  net/mlx5e: TC, Fix wrong rejection of packet-per-second policing
  net/mlx5e: Fix tc acts array not to be dependent on enum order
  net/mlx5e: Fix usage of DMA sync API
  net/mlx5e: Add missing sanity checks for max TX WQE size
  net/mlx5: fw_reset: Don't try to load device in case PCI isn't working
  net/mlx5: E-switch, Set to legacy mode if failed to change switchdev mode
  net/mlx5: Allow async trigger completion execution on single CPU systems
  net/mlx5: Bridge, verify LAG state when adding bond to bridge
====================

Link: https://lore.kernel.org/r/20221109184050.108379-1-saeed@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents b3bbeba0 7f1a6d4b
Loading
Loading
Loading
Loading
+8 −3
Original line number Diff line number Diff line
@@ -1770,12 +1770,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
	struct mlx5_cmd *cmd = &dev->cmd;
	int i;

	for (i = 0; i < cmd->max_reg_cmds; i++)
		while (down_trylock(&cmd->sem))
	for (i = 0; i < cmd->max_reg_cmds; i++) {
		while (down_trylock(&cmd->sem)) {
			mlx5_cmd_trigger_completions(dev);
			cond_resched();
		}
	}

	while (down_trylock(&cmd->pages_sem))
	while (down_trylock(&cmd->pages_sem)) {
		mlx5_cmd_trigger_completions(dev);
		cond_resched();
	}

	/* Unlock cmdif */
	up(&cmd->pages_sem);
+31 −0
Original line number Diff line number Diff line
@@ -164,6 +164,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
	return err;
}

static int
mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
	struct netdev_notifier_changeupper_info *info = ptr;
	struct net_device *upper = info->upper_dev;
	struct net_device *lower;
	struct list_head *iter;

	if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
		return 0;

	netdev_for_each_lower_dev(dev, lower, iter) {
		struct mlx5_core_dev *mdev;
		struct mlx5e_priv *priv;

		if (!mlx5e_eswitch_rep(lower))
			continue;

		priv = netdev_priv(lower);
		mdev = priv->mdev;
		if (!mlx5_lag_is_active(mdev))
			return -EAGAIN;
		if (!mlx5_lag_is_shared_fdb(mdev))
			return -EOPNOTSUPP;
	}

	return 0;
}

static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
						unsigned long event, void *ptr)
{
@@ -171,6 +201,7 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,

	switch (event) {
	case NETDEV_PRECHANGEUPPER:
		err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
		break;

	case NETDEV_CHANGEUPPER:
+32 −60
Original line number Diff line number Diff line
@@ -6,70 +6,42 @@
#include "en/tc_priv.h"
#include "mlx5_core.h"

/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = {
	&mlx5e_tc_act_accept,
	&mlx5e_tc_act_drop,
	&mlx5e_tc_act_trap,
	&mlx5e_tc_act_goto,
	&mlx5e_tc_act_mirred,
	&mlx5e_tc_act_mirred,
	&mlx5e_tc_act_redirect_ingress,
	NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
	&mlx5e_tc_act_vlan,
	&mlx5e_tc_act_vlan,
	&mlx5e_tc_act_vlan_mangle,
	&mlx5e_tc_act_tun_encap,
	&mlx5e_tc_act_tun_decap,
	&mlx5e_tc_act_pedit,
	&mlx5e_tc_act_pedit,
	&mlx5e_tc_act_csum,
	NULL, /* FLOW_ACTION_MARK, */
	&mlx5e_tc_act_ptype,
	NULL, /* FLOW_ACTION_PRIORITY, */
	NULL, /* FLOW_ACTION_WAKE, */
	NULL, /* FLOW_ACTION_QUEUE, */
	&mlx5e_tc_act_sample,
	&mlx5e_tc_act_police,
	&mlx5e_tc_act_ct,
	NULL, /* FLOW_ACTION_CT_METADATA, */
	&mlx5e_tc_act_mpls_push,
	&mlx5e_tc_act_mpls_pop,
	NULL, /* FLOW_ACTION_MPLS_MANGLE, */
	NULL, /* FLOW_ACTION_GATE, */
	NULL, /* FLOW_ACTION_PPPOE_PUSH, */
	NULL, /* FLOW_ACTION_JUMP, */
	NULL, /* FLOW_ACTION_PIPE, */
	&mlx5e_tc_act_vlan,
	&mlx5e_tc_act_vlan,
	[FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
	[FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
	[FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap,
	[FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
	[FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred,
	[FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred,
	[FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress,
	[FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan,
	[FLOW_ACTION_VLAN_POP] = &mlx5e_tc_act_vlan,
	[FLOW_ACTION_VLAN_MANGLE] = &mlx5e_tc_act_vlan_mangle,
	[FLOW_ACTION_TUNNEL_ENCAP] = &mlx5e_tc_act_tun_encap,
	[FLOW_ACTION_TUNNEL_DECAP] = &mlx5e_tc_act_tun_decap,
	[FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
	[FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
	[FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
	[FLOW_ACTION_PTYPE] = &mlx5e_tc_act_ptype,
	[FLOW_ACTION_SAMPLE] = &mlx5e_tc_act_sample,
	[FLOW_ACTION_POLICE] = &mlx5e_tc_act_police,
	[FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
	[FLOW_ACTION_MPLS_PUSH] = &mlx5e_tc_act_mpls_push,
	[FLOW_ACTION_MPLS_POP] = &mlx5e_tc_act_mpls_pop,
	[FLOW_ACTION_VLAN_PUSH_ETH] = &mlx5e_tc_act_vlan,
	[FLOW_ACTION_VLAN_POP_ETH] = &mlx5e_tc_act_vlan,
};

/* Must be aligned with enum flow_action_id. */
static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = {
	&mlx5e_tc_act_accept,
	&mlx5e_tc_act_drop,
	NULL, /* FLOW_ACTION_TRAP, */
	&mlx5e_tc_act_goto,
	&mlx5e_tc_act_mirred_nic,
	NULL, /* FLOW_ACTION_MIRRED, */
	NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */
	NULL, /* FLOW_ACTION_MIRRED_INGRESS, */
	NULL, /* FLOW_ACTION_VLAN_PUSH, */
	NULL, /* FLOW_ACTION_VLAN_POP, */
	NULL, /* FLOW_ACTION_VLAN_MANGLE, */
	NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */
	NULL, /* FLOW_ACTION_TUNNEL_DECAP, */
	&mlx5e_tc_act_pedit,
	&mlx5e_tc_act_pedit,
	&mlx5e_tc_act_csum,
	&mlx5e_tc_act_mark,
	NULL, /* FLOW_ACTION_PTYPE, */
	NULL, /* FLOW_ACTION_PRIORITY, */
	NULL, /* FLOW_ACTION_WAKE, */
	NULL, /* FLOW_ACTION_QUEUE, */
	NULL, /* FLOW_ACTION_SAMPLE, */
	NULL, /* FLOW_ACTION_POLICE, */
	&mlx5e_tc_act_ct,
	[FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept,
	[FLOW_ACTION_DROP] = &mlx5e_tc_act_drop,
	[FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto,
	[FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred_nic,
	[FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit,
	[FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit,
	[FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum,
	[FLOW_ACTION_MARK] = &mlx5e_tc_act_mark,
	[FLOW_ACTION_CT] = &mlx5e_tc_act_ct,
};

/**
+23 −1
Original line number Diff line number Diff line
@@ -11,6 +11,27 @@

#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))

/* IPSEC inline data includes:
 * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
 *    next header.
 * 2. ESP authentication data: 16 bytes for ICV.
 */
#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
					   255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)

/* 366 should be big enough to cover all L2, L3 and L4 headers with possible
 * encapsulations.
 */
#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
					    MLX5_SEND_WQE_DS)

/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
					 MLX5E_MAX_TX_INLINE_DS + \
					 MLX5E_MAX_TX_IPSEC_DS + \
					 MAX_SKB_FRAGS + 1, \
					 MLX5_SEND_WQEBB_NUM_DS)

#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)

static inline
@@ -424,6 +445,8 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,

static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
{
	WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));

	/* A WQE must not cross the page boundary, hence two conditions:
	 * 1. Its size must not exceed the page size.
	 * 2. If the WQE size is X, and the space remaining in a page is less
@@ -436,7 +459,6 @@ static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_si
		  "wqe_size %u is greater than max SQ WQEBBs %u",
		  wqe_size, mlx5e_get_max_sq_wqebbs(mdev));


	return MLX5E_STOP_ROOM(wqe_size);
}

+2 −2
Original line number Diff line number Diff line
@@ -117,7 +117,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
	xdpi.page.rq = rq;

	dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
	dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE);
	dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL);

	if (unlikely(xdp_frame_has_frags(xdpf))) {
		sinfo = xdp_get_shared_info_from_frame(xdpf);
@@ -131,7 +131,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
				skb_frag_off(frag);
			len = skb_frag_size(frag);
			dma_sync_single_for_device(sq->pdev, addr, len,
						   DMA_TO_DEVICE);
						   DMA_BIDIRECTIONAL);
		}
	}

Loading