Commit 9287aa2b authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mlxsw-mphash-policies'

Ido Schimmel says:

====================
mlxsw: Add support for new multipath hash policies

This patchset adds support for two new multipath hash policies in mlxsw.

Patch #1 emits net events whenever the
net.ipv{4,6}.fib_multipath_hash_fields sysctls are changed. This allows
listeners to react to changes in the packet fields used for the
computation of the multipath hash.

Patches #2-#3 refactor the code in mlxsw that is responsible for the
configuration of the multipath hash, so that it will be easier to extend
for the two new policies.

Patch #4 adds the register fields required to support the new policies.

Patch #5-#7 add support for inner layer 3 and custom multipath hash
policies.

Tested using following forwarding selftests:

* custom_multipath_hash.sh
* gre_custom_multipath_hash.sh
* gre_inner_v4_multipath.sh
* gre_inner_v6_multipath.sh
====================
parents 340f42f7 daeabf89
Loading
Loading
Loading
Loading
+44 −40
Original line number Diff line number Diff line
@@ -8305,6 +8305,8 @@ enum {
	MLXSW_REG_RECR2_TCP_UDP_EN_IPV4		= 7,
	/* Enable TCP/UDP header fields if packet is IPv6 */
	MLXSW_REG_RECR2_TCP_UDP_EN_IPV6		= 8,

	__MLXSW_REG_RECR2_HEADER_CNT,
};

/* reg_recr2_outer_header_enables
@@ -8339,6 +8341,8 @@ enum {
	MLXSW_REG_RECR2_TCP_UDP_SPORT			= 74,
	/* TCP/UDP Destination Port */
	MLXSW_REG_RECR2_TCP_UDP_DPORT			= 75,

	__MLXSW_REG_RECR2_FIELD_CNT,
};

/* reg_recr2_outer_header_fields_enable
@@ -8347,47 +8351,47 @@ enum {
 */
MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_fields_enable, 0x14, 0x14, 1);

static inline void mlxsw_reg_recr2_ipv4_sip_enable(char *payload)
{
	int i;

	for (i = MLXSW_REG_RECR2_IPV4_SIP0; i <= MLXSW_REG_RECR2_IPV4_SIP3; i++)
		mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
							       true);
}

static inline void mlxsw_reg_recr2_ipv4_dip_enable(char *payload)
{
	int i;

	for (i = MLXSW_REG_RECR2_IPV4_DIP0; i <= MLXSW_REG_RECR2_IPV4_DIP3; i++)
		mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
							       true);
}

static inline void mlxsw_reg_recr2_ipv6_sip_enable(char *payload)
{
	int i = MLXSW_REG_RECR2_IPV6_SIP0_7;

	mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true);

	i = MLXSW_REG_RECR2_IPV6_SIP8;
	for (; i <= MLXSW_REG_RECR2_IPV6_SIP15; i++)
		mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
							       true);
}

static inline void mlxsw_reg_recr2_ipv6_dip_enable(char *payload)
{
	int i = MLXSW_REG_RECR2_IPV6_DIP0_7;

	mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true);
/* reg_recr2_inner_header_enables
 * Bit mask where each bit enables a specific inner layer to be included in the
 * hash calculation. Same values as reg_recr2_outer_header_enables.
 * Access: RW
 */
MLXSW_ITEM_BIT_ARRAY(reg, recr2, inner_header_enables, 0x2C, 0x04, 1);

	i = MLXSW_REG_RECR2_IPV6_DIP8;
	for (; i <= MLXSW_REG_RECR2_IPV6_DIP15; i++)
		mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i,
							       true);
}
enum {
	/* Inner IPv4 Source IP */
	MLXSW_REG_RECR2_INNER_IPV4_SIP0			= 3,
	MLXSW_REG_RECR2_INNER_IPV4_SIP3			= 6,
	/* Inner IPv4 Destination IP */
	MLXSW_REG_RECR2_INNER_IPV4_DIP0			= 7,
	MLXSW_REG_RECR2_INNER_IPV4_DIP3			= 10,
	/* Inner IP Protocol */
	MLXSW_REG_RECR2_INNER_IPV4_PROTOCOL		= 11,
	/* Inner IPv6 Source IP */
	MLXSW_REG_RECR2_INNER_IPV6_SIP0_7		= 12,
	MLXSW_REG_RECR2_INNER_IPV6_SIP8			= 20,
	MLXSW_REG_RECR2_INNER_IPV6_SIP15		= 27,
	/* Inner IPv6 Destination IP */
	MLXSW_REG_RECR2_INNER_IPV6_DIP0_7		= 28,
	MLXSW_REG_RECR2_INNER_IPV6_DIP8			= 36,
	MLXSW_REG_RECR2_INNER_IPV6_DIP15		= 43,
	/* Inner IPv6 Next Header */
	MLXSW_REG_RECR2_INNER_IPV6_NEXT_HEADER		= 44,
	/* Inner IPv6 Flow Label */
	MLXSW_REG_RECR2_INNER_IPV6_FLOW_LABEL		= 45,
	/* Inner TCP/UDP Source Port */
	MLXSW_REG_RECR2_INNER_TCP_UDP_SPORT		= 46,
	/* Inner TCP/UDP Destination Port */
	MLXSW_REG_RECR2_INNER_TCP_UDP_DPORT		= 47,

	__MLXSW_REG_RECR2_INNER_FIELD_CNT,
};

/* reg_recr2_inner_header_fields_enable
 * Inner packet fields to enable for ECMP hash subject to inner_header_enables.
 * Access: RW
 */
MLXSW_ITEM_BIT_ARRAY(reg, recr2, inner_header_fields_enable, 0x30, 0x08, 1);

static inline void mlxsw_reg_recr2_pack(char *payload, u32 seed)
{
+201 −38
Original line number Diff line number Diff line
@@ -9599,66 +9599,229 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
}

#ifdef CONFIG_IP_ROUTE_MULTIPATH
static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
struct mlxsw_sp_mp_hash_config {
	DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
	DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
	DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
	DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
};

#define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
	bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)

#define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)

#define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
	bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)

static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
{
	mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
	unsigned long *inner_headers = config->inner_headers;
	unsigned long *inner_fields = config->inner_fields;

	/* IPv4 inner */
	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
	/* IPv6 inner */
	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
	MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
}

static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
{
	mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
	unsigned long *headers = config->headers;
	unsigned long *fields = config->fields;

	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
}

static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
static void
mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
			      u32 hash_fields)
{
	unsigned long *inner_headers = config->inner_headers;
	unsigned long *inner_fields = config->inner_fields;

	/* IPv4 Inner */
	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
	/* IPv6 inner */
	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
	}
	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
		MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
	}
	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
	/* L4 inner */
	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
	MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
		MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
}

static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_mp_hash_config *config)
{
	struct net *net = mlxsw_sp_net(mlxsw_sp);
	bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;

	mlxsw_sp_mp_hash_header_set(recr2_pl,
				    MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
	mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
	mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
	if (only_l3)
		return;
	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
	unsigned long *headers = config->headers;
	unsigned long *fields = config->fields;
	u32 hash_fields;

	switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
	case 0:
		mlxsw_sp_mp4_hash_outer_addr(config);
		break;
	case 1:
		mlxsw_sp_mp4_hash_outer_addr(config);
		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
		break;
	case 2:
		/* Outer */
		mlxsw_sp_mp4_hash_outer_addr(config);
		/* Inner */
		mlxsw_sp_mp_hash_inner_l3(config);
		break;
	case 3:
		hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields;
		/* Outer */
		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
		/* Inner */
		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
		break;
	}
}

static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
{
	bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
	unsigned long *headers = config->headers;
	unsigned long *fields = config->fields;

	mlxsw_sp_mp_hash_header_set(recr2_pl,
				    MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
	mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
	mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
	mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
	mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
	if (only_l3) {
		mlxsw_sp_mp_hash_field_set(recr2_pl,
					   MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
	} else {
		mlxsw_sp_mp_hash_header_set(recr2_pl,
					    MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
		mlxsw_sp_mp_hash_field_set(recr2_pl,
					   MLXSW_REG_RECR2_TCP_UDP_SPORT);
		mlxsw_sp_mp_hash_field_set(recr2_pl,
					   MLXSW_REG_RECR2_TCP_UDP_DPORT);
	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
	MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
	MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
	MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
}

static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
				   struct mlxsw_sp_mp_hash_config *config)
{
	u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
	unsigned long *headers = config->headers;
	unsigned long *fields = config->fields;

	switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
	case 0:
		mlxsw_sp_mp6_hash_outer_addr(config);
		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
		break;
	case 1:
		mlxsw_sp_mp6_hash_outer_addr(config);
		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
		MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
		break;
	case 2:
		/* Outer */
		mlxsw_sp_mp6_hash_outer_addr(config);
		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
		MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
		/* Inner */
		mlxsw_sp_mp_hash_inner_l3(config);
		break;
	case 3:
		/* Outer */
		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
		MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
		MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
		}
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
			MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
		}
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
			MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
			MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
		/* Inner */
		mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
		break;
	}
}

static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
{
	struct mlxsw_sp_mp_hash_config config = {};
	char recr2_pl[MLXSW_REG_RECR2_LEN];
	unsigned long bit;
	u32 seed;

	seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
	mlxsw_reg_recr2_pack(recr2_pl, seed);
	mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
	mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
	mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
	mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);

	for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
		mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
	for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
		mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
	for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
		mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
	for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
		mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);

	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
}
+17 −1
Original line number Diff line number Diff line
@@ -465,6 +465,22 @@ static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write,

	return ret;
}

static int proc_fib_multipath_hash_fields(struct ctl_table *table, int write,
					  void *buffer, size_t *lenp,
					  loff_t *ppos)
{
	struct net *net;
	int ret;

	net = container_of(table->data, struct net,
			   ipv4.sysctl_fib_multipath_hash_fields);
	ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
	if (write && ret == 0)
		call_netevent_notifiers(NETEVENT_IPV4_MPATH_HASH_UPDATE, net);

	return ret;
}
#endif

static struct ctl_table ipv4_table[] = {
@@ -1061,7 +1077,7 @@ static struct ctl_table ipv4_net_table[] = {
		.data		= &init_net.ipv4.sysctl_fib_multipath_hash_fields,
		.maxlen		= sizeof(u32),
		.mode		= 0644,
		.proc_handler	= proc_douintvec_minmax,
		.proc_handler	= proc_fib_multipath_hash_fields,
		.extra1		= SYSCTL_ONE,
		.extra2		= &fib_multipath_hash_fields_all_mask,
	},
+17 −1
Original line number Diff line number Diff line
@@ -44,6 +44,22 @@ static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write,
	return ret;
}

static int
proc_rt6_multipath_hash_fields(struct ctl_table *table, int write, void *buffer,
			       size_t *lenp, loff_t *ppos)
{
	struct net *net;
	int ret;

	net = container_of(table->data, struct net,
			   ipv6.sysctl.multipath_hash_fields);
	ret = proc_douintvec_minmax(table, write, buffer, lenp, ppos);
	if (write && ret == 0)
		call_netevent_notifiers(NETEVENT_IPV6_MPATH_HASH_UPDATE, net);

	return ret;
}

static struct ctl_table ipv6_table_template[] = {
	{
		.procname	= "bindv6only",
@@ -160,7 +176,7 @@ static struct ctl_table ipv6_table_template[] = {
		.data		= &init_net.ipv6.sysctl.multipath_hash_fields,
		.maxlen		= sizeof(u32),
		.mode		= 0644,
		.proc_handler	= proc_douintvec_minmax,
		.proc_handler	= proc_rt6_multipath_hash_fields,
		.extra1		= SYSCTL_ONE,
		.extra2		= &rt6_multipath_hash_fields_all_mask,
	},