Commit c9f21106 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-ipv4-sysctl-races-part-3'



Kuniyuki Iwashima says:

====================
sysctl: Fix data-races around ipv4_net_table (Round 3).

This series fixes data-races around 21 knobs after
igmp_link_local_mcast_reports in ipv4_net_table.

These 4 knobs are skipped because they are safe.

  - tcp_congestion_control: Safe with RCU and xchg().
  - tcp_available_congestion_control: Read only.
  - tcp_allowed_congestion_control: Safe with RCU and spinlock().
  - tcp_fastopen_key: Safe with RCU and xchg()

So, round 4 will start with fib_multipath_use_neigh.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1e20904e 021266ec
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -563,7 +563,7 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
	ihv3->nsrcs	= 0;
	ihv3->resv	= 0;
	ihv3->suppress	= false;
	ihv3->qrv	= amt->net->ipv4.sysctl_igmp_qrv;
	ihv3->qrv	= READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
	ihv3->csum	= 0;
	csum		= &ihv3->csum;
	csum_start	= (void *)ihv3;
@@ -3095,7 +3095,7 @@ static int amt_newlink(struct net *net, struct net_device *dev,
		goto err;
	}
	if (amt->mode == AMT_MODE_RELAY) {
		amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
		amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
		amt->qri = 10;
		dev->needed_headroom = amt->stream_dev->needed_headroom +
				       AMT_RELAY_HLEN;
+9 −5
Original line number Diff line number Diff line
@@ -1493,21 +1493,24 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
	struct net *net = sock_net((struct sock *)tp);

	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
	return tp->keepalive_intvl ? :
		READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
}

static inline int keepalive_time_when(const struct tcp_sock *tp)
{
	struct net *net = sock_net((struct sock *)tp);

	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
	return tp->keepalive_time ? :
		READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
}

static inline int keepalive_probes(const struct tcp_sock *tp)
{
	struct net *net = sock_net((struct sock *)tp);

	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
	return tp->keepalive_probes ? :
		READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
}

static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
@@ -1520,7 +1523,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)

static inline int tcp_fin_time(const struct sock *sk)
{
	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
	int fin_timeout = tcp_sk(sk)->linger2 ? :
		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
	const int rto = inet_csk(sk)->icsk_rto;

	if (fin_timeout < (rto << 2) - (rto >> 1))
@@ -2023,7 +2027,7 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
{
	struct net *net = sock_net((struct sock *)tp);
	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
	return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
}

bool tcp_stream_memory_free(const struct sock *sk, int wake);
+2 −2
Original line number Diff line number Diff line
@@ -7041,7 +7041,7 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len
	if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
		return -EINVAL;

	if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies))
		return -EINVAL;

	if (!th->ack || th->rst || th->syn)
@@ -7116,7 +7116,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
	if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
		return -EINVAL;

	if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies))
		return -ENOENT;

	if (!th->syn || th->ack || th->fin || th->rst)
+2 −2
Original line number Diff line number Diff line
@@ -387,7 +387,7 @@ void reuseport_stop_listen_sock(struct sock *sk)
		prog = rcu_dereference_protected(reuse->prog,
						 lockdep_is_held(&reuseport_lock));

		if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req ||
		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) ||
		    (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) {
			/* Migration capable, move sk from the listening section
			 * to the closed section.
@@ -545,7 +545,7 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
	hash = migrating_sk->sk_hash;
	prog = rcu_dereference(reuse->prog);
	if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) {
		if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req)
		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req))
			goto select_by_hash;
		goto failure;
	}
+1 −1
Original line number Diff line number Diff line
@@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog)
		 * because the socket was in TCP_LISTEN state previously but
		 * was shutdown() rather than close().
		 */
		tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
		tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
		if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
		    (tcp_fastopen & TFO_SERVER_ENABLE) &&
		    !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
Loading