Commit 0884aaf3 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'bridge-add-mac-authentication-bypass-mab-support'

Ido Schimmel says:

====================
bridge: Add MAC Authentication Bypass (MAB) support

Patch #1 adds MAB support in the bridge driver. See the commit message
for motivation, design choices and implementation details.

Patch #2 adds corresponding test cases.

Follow-up patchsets will add offload support in mlxsw and mv88e6xxx.
====================

Link: https://lore.kernel.org/r/20221101193922.2125323-1-idosch@nvidia.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents fbeb229a 4a331d34
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -59,6 +59,7 @@ struct br_ip_list {
#define BR_MRP_LOST_IN_CONT	BIT(19)
#define BR_TX_FWD_OFFLOAD	BIT(20)
#define BR_PORT_LOCKED		BIT(21)
#define BR_PORT_MAB		BIT(22)

#define BR_DEFAULT_AGEING_TIME	(300 * HZ)

+1 −0
Original line number Diff line number Diff line
@@ -561,6 +561,7 @@ enum {
	IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
	IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
	IFLA_BRPORT_LOCKED,
	IFLA_BRPORT_MAB,
	__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
+7 −1
Original line number Diff line number Diff line
@@ -53,6 +53,7 @@ enum {
#define NTF_ROUTER	(1 << 7)
/* Extended flags under NDA_FLAGS_EXT: */
#define NTF_EXT_MANAGED		(1 << 0)
#define NTF_EXT_LOCKED		(1 << 1)

/*
 *	Neighbor Cache Entry States.
@@ -86,6 +87,11 @@ enum {
 * NTF_EXT_MANAGED flagged neigbor entries are managed by the kernel on behalf
 * of a user space control plane, and automatically refreshed so that (if
 * possible) they remain in NUD_REACHABLE state.
 *
 * NTF_EXT_LOCKED flagged bridge FDB entries are entries generated by the
 * bridge in response to a host trying to communicate via a locked bridge port
 * with MAB enabled. Their purpose is to notify user space that a host requires
 * authentication.
 */

struct nda_cacheinfo {
+24 −0
Original line number Diff line number Diff line
@@ -105,6 +105,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
	struct nda_cacheinfo ci;
	struct nlmsghdr *nlh;
	struct ndmsg *ndm;
	u32 ext_flags = 0;

	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
	if (nlh == NULL)
@@ -125,11 +126,16 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
		ndm->ndm_flags |= NTF_EXT_LEARNED;
	if (test_bit(BR_FDB_STICKY, &fdb->flags))
		ndm->ndm_flags |= NTF_STICKY;
	if (test_bit(BR_FDB_LOCKED, &fdb->flags))
		ext_flags |= NTF_EXT_LOCKED;

	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
		goto nla_put_failure;
	if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
		goto nla_put_failure;
	if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags))
		goto nla_put_failure;

	ci.ndm_used	 = jiffies_to_clock_t(now - fdb->used);
	ci.ndm_confirmed = 0;
	ci.ndm_updated	 = jiffies_to_clock_t(now - fdb->updated);
@@ -171,6 +177,7 @@ static inline size_t fdb_nlmsg_size(void)
	return NLMSG_ALIGN(sizeof(struct ndmsg))
		+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
		+ nla_total_size(sizeof(u32)) /* NDA_MASTER */
		+ nla_total_size(sizeof(u32)) /* NDA_FLAGS_EXT */
		+ nla_total_size(sizeof(u16)) /* NDA_VLAN */
		+ nla_total_size(sizeof(struct nda_cacheinfo))
		+ nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
@@ -879,6 +886,11 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
						      &fdb->flags)))
					clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
						  &fdb->flags);
				/* Clear locked flag when roaming to an
				 * unlocked port.
				 */
				if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags)))
					clear_bit(BR_FDB_LOCKED, &fdb->flags);
			}

			if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
@@ -1082,6 +1094,9 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
		modified = true;
	}

	if (test_and_clear_bit(BR_FDB_LOCKED, &fdb->flags))
		modified = true;

	if (fdb_handle_notify(fdb, notify))
		modified = true;

@@ -1150,6 +1165,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
	struct net_bridge_port *p = NULL;
	struct net_bridge_vlan *v;
	struct net_bridge *br = NULL;
	u32 ext_flags = 0;
	int err = 0;

	trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
@@ -1178,6 +1194,14 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
		vg = nbp_vlan_group(p);
	}

	if (tb[NDA_FLAGS_EXT])
		ext_flags = nla_get_u32(tb[NDA_FLAGS_EXT]);

	if (ext_flags & NTF_EXT_LOCKED) {
		NL_SET_ERR_MSG_MOD(extack, "Cannot add FDB entry with \"locked\" flag set");
		return -EINVAL;
	}

	if (tb[NDA_FDB_EXT_ATTRS]) {
		attr = tb[NDA_FDB_EXT_ATTRS];
		err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
+19 −2
Original line number Diff line number Diff line
@@ -109,9 +109,26 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
		struct net_bridge_fdb_entry *fdb_src =
			br_fdb_find_rcu(br, eth_hdr(skb)->h_source, vid);

		if (!fdb_src || READ_ONCE(fdb_src->dst) != p ||
		    test_bit(BR_FDB_LOCAL, &fdb_src->flags))
		if (!fdb_src) {
			/* FDB miss. Create locked FDB entry if MAB is enabled
			 * and drop the packet.
			 */
			if (p->flags & BR_PORT_MAB)
				br_fdb_update(br, p, eth_hdr(skb)->h_source,
					      vid, BIT(BR_FDB_LOCKED));
			goto drop;
		} else if (READ_ONCE(fdb_src->dst) != p ||
			   test_bit(BR_FDB_LOCAL, &fdb_src->flags)) {
			/* FDB mismatch. Drop the packet without roaming. */
			goto drop;
		} else if test_bit(BR_FDB_LOCKED, &fdb_src->flags) {
			/* FDB match, but entry is locked. Refresh it and drop
			 * the packet.
			 */
			br_fdb_update(br, p, eth_hdr(skb)->h_source, vid,
				      BIT(BR_FDB_LOCKED));
			goto drop;
		}
	}

	nbp_switchdev_frame_mark(p, skb);
Loading