Commit 244fd698 authored by Maor Dickman's avatar Maor Dickman Committed by Jakub Kicinski
Browse files

net/mlx5e: TC, Extract indr setup block checks to function



In preparation for next patch which will add new check
if device block can be setup, extract all existing checks
to function to make it more readable and maintainable.

Signed-off-by: default avatarMaor Dickman <maord@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Link: https://lore.kernel.org/r/20230314054234.267365-14-saeed@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8a0594c0
Loading
Loading
Loading
Loading
+36 −22
Original line number Diff line number Diff line
@@ -426,39 +426,53 @@ static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev)
	return macvlan->mode == MACVLAN_MODE_PASSTHRU;
}

static int
mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
			   struct mlx5e_rep_priv *rpriv,
			   struct flow_block_offload *f,
			   flow_setup_cb_t *setup_cb,
			   void *data,
			   void (*cleanup)(struct flow_block_cb *block_cb))
static bool
mlx5e_rep_check_indr_block_supported(struct mlx5e_rep_priv *rpriv,
				     struct net_device *netdev,
				     struct flow_block_offload *f)
{
	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
	bool is_ovs_int_port = netif_is_ovs_master(netdev);
	struct mlx5e_rep_indr_block_priv *indr_priv;
	struct flow_block_cb *block_cb;

	if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
	    !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev) &&
	    !is_ovs_int_port) {
		if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev))
			return -EOPNOTSUPP;
	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
		return false;

	if (mlx5e_tc_tun_device_to_offload(priv, netdev))
		return true;

	if (is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)
		return true;

	if (netif_is_macvlan(netdev)) {
		if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
			netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
			return -EOPNOTSUPP;
			return false;
		}

		if (macvlan_dev_real_dev(netdev) == rpriv->netdev)
			return true;
	}

	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
		return -EOPNOTSUPP;
	if (netif_is_ovs_master(netdev) && f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
	    mlx5e_tc_int_port_supported(esw))
		return true;

	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && !is_ovs_int_port)
		return -EOPNOTSUPP;
	return false;
}

static int
mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
			   struct mlx5e_rep_priv *rpriv,
			   struct flow_block_offload *f,
			   flow_setup_cb_t *setup_cb,
			   void *data,
			   void (*cleanup)(struct flow_block_cb *block_cb))
{
	struct mlx5e_rep_indr_block_priv *indr_priv;
	struct flow_block_cb *block_cb;

	if (is_ovs_int_port && !mlx5e_tc_int_port_supported(esw))
	if (!mlx5e_rep_check_indr_block_supported(rpriv, netdev, f))
		return -EOPNOTSUPP;

	f->unlocked_driver_cb = true;