Commit ceb20a3c authored by Paolo Abeni's avatar Paolo Abeni
Browse files
Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for net:

1) Fix missing overflow use refcount checks in nf_tables.

2) Do not set IPS_ASSURED for IPS_NAT_CLASH entries in GRE tracker,
   from Florian Westphal.

3) Bail out if nf_ct_helper_hash is NULL before registering helper,
   from Florent Revest.

4) Use siphash() instead siphash_4u64() to fix performance regression,
   also from Florian.

5) Do not allow to add rules to removed chains via ID,
   from Thadeu Lima de Souza Cascardo.

6) Fix oob read access in byteorder expression, also from Thadeu.

netfilter pull request 23-07-06

====================

Link: https://lore.kernel.org/r/20230705230406.52201-1-pablo@netfilter.org


Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 21327f81 caf3ef74
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -67,6 +67,9 @@ struct nf_conntrack_tuple {
		/* The protocol. */
		u_int8_t protonum;

		/* The direction must be ignored for the tuplehash */
		struct { } __nfct_hash_offsetend;

		/* The direction (for tuplehash) */
		u_int8_t dir;
	} dst;
+27 −4
Original line number Diff line number Diff line
@@ -1211,6 +1211,29 @@ int __nft_release_basechain(struct nft_ctx *ctx);

unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);

static inline bool nft_use_inc(u32 *use)
{
	if (*use == UINT_MAX)
		return false;

	(*use)++;

	return true;
}

static inline void nft_use_dec(u32 *use)
{
	WARN_ON_ONCE((*use)-- == 0);
}

/* For error and abort path: restore use counter to previous state. */
static inline void nft_use_inc_restore(u32 *use)
{
	WARN_ON_ONCE(!nft_use_inc(use));
}

#define nft_use_dec_restore	nft_use_dec

/**
 *	struct nft_table - nf_tables table
 *
@@ -1296,8 +1319,8 @@ struct nft_object {
	struct list_head		list;
	struct rhlist_head		rhlhead;
	struct nft_object_hash_key	key;
	u32				genmask:2,
					use:30;
	u32				genmask:2;
	u32				use;
	u64				handle;
	u16				udlen;
	u8				*udata;
@@ -1399,8 +1422,8 @@ struct nft_flowtable {
	char				*name;
	int				hooknum;
	int				ops_len;
	u32				genmask:2,
					use:30;
	u32				genmask:2;
	u32				use;
	u64				handle;
	/* runtime data below here */
	struct list_head		hook_list ____cacheline_aligned;
+7 −13
Original line number Diff line number Diff line
@@ -211,24 +211,18 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
			      unsigned int zoneid,
			      const struct net *net)
{
	u64 a, b, c, d;
	siphash_key_t key;

	get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));

	/* The direction must be ignored, handle usable tuplehash members manually */
	a = (u64)tuple->src.u3.all[0] << 32 | tuple->src.u3.all[3];
	b = (u64)tuple->dst.u3.all[0] << 32 | tuple->dst.u3.all[3];
	key = nf_conntrack_hash_rnd;

	c = (__force u64)tuple->src.u.all << 32 | (__force u64)tuple->dst.u.all << 16;
	c |= tuple->dst.protonum;
	key.key[0] ^= zoneid;
	key.key[1] ^= net_hash_mix(net);

	d = (u64)zoneid << 32 | net_hash_mix(net);

	/* IPv4: u3.all[1,2,3] == 0 */
	c ^= (u64)tuple->src.u3.all[1] << 32 | tuple->src.u3.all[2];
	d += (u64)tuple->dst.u3.all[1] << 32 | tuple->dst.u3.all[2];

	return (u32)siphash_4u64(a, b, c, d, &nf_conntrack_hash_rnd);
	return siphash((void *)tuple,
			offsetofend(struct nf_conntrack_tuple, dst.__nfct_hash_offsetend),
			&key);
}

static u32 scale_hash(u32 hash)
+4 −0
Original line number Diff line number Diff line
@@ -360,6 +360,9 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
	BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
	BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);

	if (!nf_ct_helper_hash)
		return -ENOENT;

	if (me->expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
		return -EINVAL;

@@ -515,4 +518,5 @@ int nf_conntrack_helper_init(void)
void nf_conntrack_helper_fini(void)
{
	kvfree(nf_ct_helper_hash);
	nf_ct_helper_hash = NULL;
}
+9 −1
Original line number Diff line number Diff line
@@ -205,6 +205,8 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
			    enum ip_conntrack_info ctinfo,
			    const struct nf_hook_state *state)
{
	unsigned long status;

	if (!nf_ct_is_confirmed(ct)) {
		unsigned int *timeouts = nf_ct_timeout_lookup(ct);

@@ -217,11 +219,17 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
		ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
	}

	status = READ_ONCE(ct->status);
	/* If we've seen traffic both ways, this is a GRE connection.
	 * Extend timeout. */
	if (ct->status & IPS_SEEN_REPLY) {
	if (status & IPS_SEEN_REPLY) {
		nf_ct_refresh_acct(ct, ctinfo, skb,
				   ct->proto.gre.stream_timeout);

		/* never set ASSURED for IPS_NAT_CLASH, they time out soon */
		if (unlikely((status & IPS_NAT_CLASH)))
			return NF_ACCEPT;

		/* Also, more likely to be important, and not a probe. */
		if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
			nf_conntrack_event_cache(IPCT_ASSURED, ct);
Loading