Commit 9ba11ac0 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlx4-next'

Amir Vadai says:

====================
Mellanox drivers updates Feb-03-2015

This patchset introduces some small bug fixes and code cleanups in mlx4_core,
mlx4_en and mlx5_core.
I am sending it in parallel to the patchset sent by Or Gerlitz today [1] because
this is the end of the time frame for 3.20. I also checked that there are no
conflicts between those two patchsets (Or's patchset is focused on the bonding
area while this on Mellanox drivers).

The patchset was applied on top of commit 7d37d0c1 ('net: sctp: Deletion of an
unnecessary check before the function call "kfree"')

[1] - [PATCH 00/10] Add HA and LAG support to mlx4 RoCE and SRIOV services
      http://marc.info/?l=linux-netdev&m=142297582610254&w=2


====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ce388fff cfb53f36
Loading
Loading
Loading
Loading
+4 −0
Original line number Original line Diff line number Diff line
@@ -2202,6 +2202,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
			return ret;
			return ret;
	}
	}


	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX))
		en_info(priv, "Turn %s TX vlan strip offload\n",
			(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");

	if (features & NETIF_F_LOOPBACK)
	if (features & NETIF_F_LOOPBACK)
		priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
		priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
	else
	else
+7 −2
Original line number Original line Diff line number Diff line
@@ -162,6 +162,10 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
		if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
		if (mlx4_alloc_pages(priv, &ring->page_alloc[i],
				     frag_info, GFP_KERNEL | __GFP_COLD))
				     frag_info, GFP_KERNEL | __GFP_COLD))
			goto out;
			goto out;

		en_dbg(DRV, priv, "  frag %d allocator: - size:%d frags:%d\n",
		       i, ring->page_alloc[i].page_size,
		       atomic_read(&ring->page_alloc[i].page->_count));
	}
	}
	return 0;
	return 0;


@@ -1059,7 +1063,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
			(eff_mtu > buf_size + frag_sizes[i]) ?
			(eff_mtu > buf_size + frag_sizes[i]) ?
				frag_sizes[i] : eff_mtu - buf_size;
				frag_sizes[i] : eff_mtu - buf_size;
		priv->frag_info[i].frag_prefix_size = buf_size;
		priv->frag_info[i].frag_prefix_size = buf_size;
		priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i],
		priv->frag_info[i].frag_stride =
				ALIGN(priv->frag_info[i].frag_size,
				      SMP_CACHE_BYTES);
				      SMP_CACHE_BYTES);
		buf_size += priv->frag_info[i].frag_size;
		buf_size += priv->frag_info[i].frag_size;
		i++;
		i++;
+1 −1
Original line number Original line Diff line number Diff line
@@ -2169,7 +2169,7 @@ static const u8 config_dev_csum_flags[] = {
int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
			      struct mlx4_config_dev_params *params)
			      struct mlx4_config_dev_params *params)
{
{
	struct mlx4_config_dev config_dev;
	struct mlx4_config_dev config_dev = {0};
	int err;
	int err;
	u8 csum_mask;
	u8 csum_mask;


+2 −1
Original line number Original line Diff line number Diff line
@@ -251,7 +251,8 @@ static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
		if (mlx4_is_master(dev))
		if (mlx4_is_master(dev))
			dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
			dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
	} else {
	} else {
		mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n");
		if (cache_line_size() != 32  && cache_line_size() != 64)
			mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
		dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
	}
	}
+5 −8
Original line number Original line Diff line number Diff line
@@ -598,14 +598,11 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
	if (err)
	if (err)
		return err;
		return err;


	mpt_entry->start       = cpu_to_be64(mr->iova);
	mpt_entry->start       = cpu_to_be64(iova);
	mpt_entry->length      = cpu_to_be64(mr->size);
	mpt_entry->length      = cpu_to_be64(size);
	mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
	mpt_entry->entity_size = cpu_to_be32(page_shift);

	mpt_entry->flags    &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE |
	mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
					   MLX4_MPT_FLAG_SW_OWNS));
					   MLX4_MPT_PD_FLAG_EN_INV);
	mpt_entry->flags    &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
					   MLX4_MPT_FLAG_SW_OWNS);
	if (mr->mtt.order < 0) {
	if (mr->mtt.order < 0) {
		mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
		mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
		mpt_entry->mtt_addr = 0;
		mpt_entry->mtt_addr = 0;
Loading