Commit fa663c09 authored by Jian Shen's avatar Jian Shen Committed by David S. Miller
Browse files

net: hns3: split out hclge_get_fd_rule_info()



hclge_get_fd_rule_info() is bloated, this patch separates
it into several standalone functions for readability and
maintainability.

Signed-off-by: default avatarJian Shen <shenjian15@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 736fc0e1
Loading
Loading
Loading
Loading
+159 −144
Original line number Diff line number Diff line
@@ -5938,178 +5938,131 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
	return 0;
}

static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
				  struct ethtool_rxnfc *cmd)
static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
				     struct ethtool_tcpip4_spec *spec,
				     struct ethtool_tcpip4_spec *spec_mask)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_fd_rule *rule = NULL;
	struct hclge_dev *hdev = vport->back;
	struct ethtool_rx_flow_spec *fs;
	struct hlist_node *node2;

	if (!hnae3_dev_fd_supported(hdev))
		return -EOPNOTSUPP;

	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;

	spin_lock_bh(&hdev->fd_rule_lock);

	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
		if (rule->location >= fs->location)
			break;
	}

	if (!rule || fs->location != rule->location) {
		spin_unlock_bh(&hdev->fd_rule_lock);

		return -ENOENT;
	}

	fs->flow_type = rule->flow_type;
	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
	case SCTP_V4_FLOW:
	case TCP_V4_FLOW:
	case UDP_V4_FLOW:
		fs->h_u.tcp_ip4_spec.ip4src =
				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
		fs->m_u.tcp_ip4_spec.ip4src =
			rule->unused_tuple & BIT(INNER_SRC_IP) ?
	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);

		fs->h_u.tcp_ip4_spec.ip4dst =
				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
		fs->m_u.tcp_ip4_spec.ip4dst =
			rule->unused_tuple & BIT(INNER_DST_IP) ?
	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);

		fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
		fs->m_u.tcp_ip4_spec.psrc =
				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
	spec->psrc = cpu_to_be16(rule->tuples.src_port);
	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
			0 : cpu_to_be16(rule->tuples_mask.src_port);

		fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
		fs->m_u.tcp_ip4_spec.pdst =
				rule->unused_tuple & BIT(INNER_DST_PORT) ?
	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
			0 : cpu_to_be16(rule->tuples_mask.dst_port);

		fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
		fs->m_u.tcp_ip4_spec.tos =
				rule->unused_tuple & BIT(INNER_IP_TOS) ?
	spec->tos = rule->tuples.ip_tos;
	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
			0 : rule->tuples_mask.ip_tos;
}

		break;
	case IP_USER_FLOW:
		fs->h_u.usr_ip4_spec.ip4src =
				cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
		fs->m_u.tcp_ip4_spec.ip4src =
			rule->unused_tuple & BIT(INNER_SRC_IP) ?
static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
				  struct ethtool_usrip4_spec *spec,
				  struct ethtool_usrip4_spec *spec_mask)
{
	spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
	spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
			0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);

		fs->h_u.usr_ip4_spec.ip4dst =
				cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
		fs->m_u.usr_ip4_spec.ip4dst =
			rule->unused_tuple & BIT(INNER_DST_IP) ?
	spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
	spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
			0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);

		fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
		fs->m_u.usr_ip4_spec.tos =
				rule->unused_tuple & BIT(INNER_IP_TOS) ?
	spec->tos = rule->tuples.ip_tos;
	spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
			0 : rule->tuples_mask.ip_tos;

		fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
		fs->m_u.usr_ip4_spec.proto =
				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
	spec->proto = rule->tuples.ip_proto;
	spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
			0 : rule->tuples_mask.ip_proto;

		fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
	spec->ip_ver = ETH_RX_NFC_IP4;
}

		break;
	case SCTP_V6_FLOW:
	case TCP_V6_FLOW:
	case UDP_V6_FLOW:
		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
				     struct ethtool_tcpip6_spec *spec,
				     struct ethtool_tcpip6_spec *spec_mask)
{
	cpu_to_be32_array(spec->ip6src,
			  rule->tuples.src_ip, IPV6_SIZE);
	cpu_to_be32_array(spec->ip6dst,
			  rule->tuples.dst_ip, IPV6_SIZE);
	if (rule->unused_tuple & BIT(INNER_SRC_IP))
			memset(fs->m_u.tcp_ip6_spec.ip6src, 0,
			       sizeof(int) * IPV6_SIZE);
		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
	else
			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
					  rule->tuples_mask.src_ip, IPV6_SIZE);
		cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
				  IPV6_SIZE);

		cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
				  rule->tuples.dst_ip, IPV6_SIZE);
	if (rule->unused_tuple & BIT(INNER_DST_IP))
			memset(fs->m_u.tcp_ip6_spec.ip6dst, 0,
			       sizeof(int) * IPV6_SIZE);
		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
	else
			cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
					  rule->tuples_mask.dst_ip, IPV6_SIZE);
		cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
				  IPV6_SIZE);

		fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
		fs->m_u.tcp_ip6_spec.psrc =
				rule->unused_tuple & BIT(INNER_SRC_PORT) ?
	spec->psrc = cpu_to_be16(rule->tuples.src_port);
	spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
			0 : cpu_to_be16(rule->tuples_mask.src_port);

		fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
		fs->m_u.tcp_ip6_spec.pdst =
				rule->unused_tuple & BIT(INNER_DST_PORT) ?
	spec->pdst = cpu_to_be16(rule->tuples.dst_port);
	spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
			0 : cpu_to_be16(rule->tuples_mask.dst_port);
}

		break;
	case IPV6_USER_FLOW:
		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
				  rule->tuples.src_ip, IPV6_SIZE);
static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
				  struct ethtool_usrip6_spec *spec,
				  struct ethtool_usrip6_spec *spec_mask)
{
	cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
	cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
	if (rule->unused_tuple & BIT(INNER_SRC_IP))
			memset(fs->m_u.usr_ip6_spec.ip6src, 0,
			       sizeof(int) * IPV6_SIZE);
		memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
	else
			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
		cpu_to_be32_array(spec_mask->ip6src,
				  rule->tuples_mask.src_ip, IPV6_SIZE);

		cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
				  rule->tuples.dst_ip, IPV6_SIZE);
	if (rule->unused_tuple & BIT(INNER_DST_IP))
			memset(fs->m_u.usr_ip6_spec.ip6dst, 0,
			       sizeof(int) * IPV6_SIZE);
		memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
	else
			cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
		cpu_to_be32_array(spec_mask->ip6dst,
				  rule->tuples_mask.dst_ip, IPV6_SIZE);

		fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
		fs->m_u.usr_ip6_spec.l4_proto =
				rule->unused_tuple & BIT(INNER_IP_PROTO) ?
	spec->l4_proto = rule->tuples.ip_proto;
	spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
			0 : rule->tuples_mask.ip_proto;
}

static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
				    struct ethhdr *spec,
				    struct ethhdr *spec_mask)
{
	ether_addr_copy(spec->h_source, rule->tuples.src_mac);
	ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);

		break;
	case ETHER_FLOW:
		ether_addr_copy(fs->h_u.ether_spec.h_source,
				rule->tuples.src_mac);
	if (rule->unused_tuple & BIT(INNER_SRC_MAC))
			eth_zero_addr(fs->m_u.ether_spec.h_source);
		eth_zero_addr(spec_mask->h_source);
	else
			ether_addr_copy(fs->m_u.ether_spec.h_source,
					rule->tuples_mask.src_mac);
		ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);

		ether_addr_copy(fs->h_u.ether_spec.h_dest,
				rule->tuples.dst_mac);
	if (rule->unused_tuple & BIT(INNER_DST_MAC))
			eth_zero_addr(fs->m_u.ether_spec.h_dest);
		eth_zero_addr(spec_mask->h_dest);
	else
			ether_addr_copy(fs->m_u.ether_spec.h_dest,
					rule->tuples_mask.dst_mac);
		ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);

		fs->h_u.ether_spec.h_proto =
				cpu_to_be16(rule->tuples.ether_proto);
		fs->m_u.ether_spec.h_proto =
				rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
	spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
	spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
			0 : cpu_to_be16(rule->tuples_mask.ether_proto);

		break;
	default:
		spin_unlock_bh(&hdev->fd_rule_lock);
		return -EOPNOTSUPP;
}

static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
				  struct hclge_fd_rule *rule)
{
	if (fs->flow_type & FLOW_EXT) {
		fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
		fs->m_ext.vlan_tci =
@@ -6126,6 +6079,68 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
			ether_addr_copy(fs->m_u.ether_spec.h_dest,
					rule->tuples_mask.dst_mac);
	}
}

static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
				  struct ethtool_rxnfc *cmd)
{
	struct hclge_vport *vport = hclge_get_vport(handle);
	struct hclge_fd_rule *rule = NULL;
	struct hclge_dev *hdev = vport->back;
	struct ethtool_rx_flow_spec *fs;
	struct hlist_node *node2;

	if (!hnae3_dev_fd_supported(hdev))
		return -EOPNOTSUPP;

	fs = (struct ethtool_rx_flow_spec *)&cmd->fs;

	spin_lock_bh(&hdev->fd_rule_lock);

	hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
		if (rule->location >= fs->location)
			break;
	}

	if (!rule || fs->location != rule->location) {
		spin_unlock_bh(&hdev->fd_rule_lock);

		return -ENOENT;
	}

	fs->flow_type = rule->flow_type;
	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
	case SCTP_V4_FLOW:
	case TCP_V4_FLOW:
	case UDP_V4_FLOW:
		hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
					 &fs->m_u.tcp_ip4_spec);
		break;
	case IP_USER_FLOW:
		hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
				      &fs->m_u.usr_ip4_spec);
		break;
	case SCTP_V6_FLOW:
	case TCP_V6_FLOW:
	case UDP_V6_FLOW:
		hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
					 &fs->m_u.tcp_ip6_spec);
		break;
	case IPV6_USER_FLOW:
		hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
				      &fs->m_u.usr_ip6_spec);
		break;
	/* The flow type of fd rule has been checked before adding in to rule
	 * list. As other flow types have been handled, it must be ETHER_FLOW
	 * for the default case
	 */
	default:
		hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
					&fs->m_u.ether_spec);
		break;
	}

	hclge_fd_get_ext_info(fs, rule);

	if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
		fs->ring_cookie = RX_CLS_FLOW_DISC;