Commit 7c6327c7 authored by Paolo Abeni's avatar Paolo Abeni
Browse files


Conflicts:

net/ax25/af_ax25.c
  d7c4c9e0 ("ax25: fix incorrect dev_tracker usage")
  d62607c3 ("net: rename reference+tracking helpers")

drivers/net/netdevsim/fib.c
  180a6a3e ("netdevsim: fib: Fix reference count leak on route deletion failure")
  012ec02a ("netdevsim: convert driver to use unlocked devlink API during init/fini")

Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 6f63d044 8eaa1d11
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -92,6 +92,7 @@ struct iavf_vsi {
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
#define IAVF_MBPS_DIVISOR	125000 /* divisor to convert to Mbps */
#define IAVF_MBPS_QUANTA	50

#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
					(IAVF_MAX_VF_VSI * \
@@ -433,6 +434,11 @@ struct iavf_adapter {
	/* lock to protect access to the cloud filter list */
	spinlock_t cloud_filter_list_lock;
	u16 num_cloud_filters;
	/* snapshot of "num_active_queues" before setup_tc for qdisc add
	 * is invoked. This information is useful during qdisc del flow,
	 * to restore correct number of queues
	 */
	int orig_num_active_queues;

#define IAVF_MAX_FDIR_FILTERS 128	/* max allowed Flow Director filters */
	u16 fdir_active_fltr;
+44 −2
Original line number Diff line number Diff line
@@ -3410,6 +3410,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
				   struct tc_mqprio_qopt_offload *mqprio_qopt)
{
	u64 total_max_rate = 0;
	u32 tx_rate_rem = 0;
	int i, num_qps = 0;
	u64 tx_rate = 0;
	int ret = 0;
@@ -3424,12 +3425,32 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
			return -EINVAL;
		if (mqprio_qopt->min_rate[i]) {
			dev_err(&adapter->pdev->dev,
				"Invalid min tx rate (greater than 0) specified\n");
				"Invalid min tx rate (greater than 0) specified for TC%d\n",
				i);
			return -EINVAL;
		}

		/* convert to Mbps */
		tx_rate = div_u64(mqprio_qopt->max_rate[i],
				  IAVF_MBPS_DIVISOR);

		if (mqprio_qopt->max_rate[i] &&
		    tx_rate < IAVF_MBPS_QUANTA) {
			dev_err(&adapter->pdev->dev,
				"Invalid max tx rate for TC%d, minimum %dMbps\n",
				i, IAVF_MBPS_QUANTA);
			return -EINVAL;
		}

		(void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);

		if (tx_rate_rem != 0) {
			dev_err(&adapter->pdev->dev,
				"Invalid max tx rate for TC%d, not divisible by %d\n",
				i, IAVF_MBPS_QUANTA);
			return -EINVAL;
		}

		total_max_rate += tx_rate;
		num_qps += mqprio_qopt->qopt.count[i];
	}
@@ -3496,6 +3517,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
			netif_tx_disable(netdev);
			iavf_del_all_cloud_filters(adapter);
			adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
			total_qps = adapter->orig_num_active_queues;
			goto exit;
		} else {
			return -EINVAL;
@@ -3539,7 +3561,21 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
				adapter->ch_config.ch_info[i].offset = 0;
			}
		}

		/* Take snapshot of original config such as "num_active_queues"
		 * It is used later when delete ADQ flow is exercised, so that
		 * once delete ADQ flow completes, VF shall go back to its
		 * original queue configuration
		 */

		adapter->orig_num_active_queues = adapter->num_active_queues;

		/* Store queue info based on TC so that VF gets configured
		 * with correct number of queues when VF completes ADQ config
		 * flow
		 */
		adapter->ch_config.total_qps = total_qps;

		netif_tx_stop_all_queues(netdev);
		netif_tx_disable(netdev);
		adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
@@ -3556,6 +3592,12 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
		}
	}
exit:
	if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
		return 0;

	netif_set_real_num_rx_queues(netdev, total_qps);
	netif_set_real_num_tx_queues(netdev, total_qps);

	return ret;
}

+1 −1
Original line number Diff line number Diff line
@@ -433,7 +433,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
						IFF_PROMISC;
					goto out_promisc;
				}
				if (vsi->current_netdev_flags &
				if (vsi->netdev->features &
				    NETIF_F_HW_VLAN_CTAG_FILTER)
					vlan_ops->ena_rx_filtering(vsi);
			}
+1 −1
Original line number Diff line number Diff line
@@ -21,7 +21,7 @@
#define OTX2_HEAD_ROOM		OTX2_ALIGN

#define	OTX2_ETH_HLEN		(VLAN_ETH_HLEN + VLAN_HLEN)
#define	OTX2_MIN_MTU		64
#define	OTX2_MIN_MTU		60

#define OTX2_MAX_GSO_SEGS	255
#define OTX2_MAX_FRAGS_IN_SQE	9
+11 −10
Original line number Diff line number Diff line
@@ -109,7 +109,7 @@ struct page_pool;
#define MLX5E_REQUIRED_WQE_MTTS		(MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
#define MLX5E_REQUIRED_MTTS(wqes)	(wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_MAX_RQ_NUM_MTTS	\
	((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
	(ALIGN_DOWN(U16_MAX, 4) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW	\
		(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
@@ -174,8 +174,8 @@ struct page_pool;
	ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)

#define MLX5E_MAX_KLM_PER_WQE(mdev) \
	MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \
				   << MLX5_MKEY_BSF_OCTO_SIZE)
	MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * \
		mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)))

#define MLX5E_MSG_LEVEL			NETIF_MSG_LINK

@@ -233,7 +233,7 @@ static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
		     MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
}

static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
static inline u8 mlx5e_get_sw_max_sq_mpw_wqebbs(u8 max_sq_wqebbs)
{
/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
 * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
@@ -242,11 +242,12 @@ static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
 * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
 * cache-aligned.
 */
#if L1_CACHE_BYTES < 128
	return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
#else
	return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2);
	u8 wqebbs = min_t(u8, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);

#if L1_CACHE_BYTES >= 128
	wqebbs = ALIGN_DOWN(wqebbs, 2);
#endif
	return wqebbs;
}

struct mlx5e_tx_wqe {
@@ -456,7 +457,7 @@ struct mlx5e_txqsq {
	struct netdev_queue       *txq;
	u32                        sqn;
	u16                        stop_room;
	u16                        max_sq_mpw_wqebbs;
	u8                         max_sq_mpw_wqebbs;
	u8                         min_inline_mode;
	struct device             *pdev;
	__be32                     mkey_be;
@@ -571,7 +572,7 @@ struct mlx5e_xdpsq {
	struct device             *pdev;
	__be32                     mkey_be;
	u16                        stop_room;
	u16                        max_sq_mpw_wqebbs;
	u8                         max_sq_mpw_wqebbs;
	u8                         min_inline_mode;
	unsigned long              state;
	unsigned int               hw_mtu;
Loading