Commit 0c4a9541 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'sysctl-data-races'

Kuniyuki Iwashima says:

====================
net: sysctl: Fix data-races around net.core.XXX

This series fixes data-races around all knobs in net_core_table and
netns_core_table except for bpf stuff.

These knobs are skipped:

  - 4 bpf knobs
  - netdev_rss_key: Written only once by net_get_random_once() and
                    read-only knob
  - rps_sock_flow_entries: Protected with sock_flow_mutex
  - flow_limit_cpu_bitmap: Protected with flow_limit_update_mutex
  - flow_limit_table_len: Protected with flow_limit_update_mutex
  - default_qdisc: Protected with qdisc_mod_lock
  - warnings: Unused
  - high_order_alloc_disable: Protected with static_key_mutex
  - skb_defer_max: Already using READ_ONCE()
  - sysctl_txrehash: Already using READ_ONCE()

Note 5th patch fixes net.core.message_cost and net.core.message_burst,
and lib/ratelimit.c does not have an explicit maintainer.

Changes:
  v3:
    * Fix build failures of CONFIG_SYSCTL=n case in 13th & 14th patches

  v2: https://lore.kernel.org/netdev/20220818035227.81567-1-kuniyu@amazon.com/
    * Remove 4 bpf knobs and added 6 knobs

  v1: https://lore.kernel.org/netdev/20220816052347.70042-1-kuniyu@amazon.com/


====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c624c58e 3c9ba81d
Loading
Loading
Loading
Loading
+17 −3
Original line number Diff line number Diff line
@@ -640,9 +640,23 @@ extern int sysctl_devconf_inherit_init_net;
 */
static inline bool net_has_fallback_tunnels(const struct net *net)
{
	return !IS_ENABLED(CONFIG_SYSCTL) ||
	       !sysctl_fb_tunnels_only_for_init_net ||
	       (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
#if IS_ENABLED(CONFIG_SYSCTL)
	int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);

	return !fb_tunnels_only_for_init_net ||
		(net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
#else
	return true;
#endif
}

static inline int net_inherit_devconf(void)
{
#if IS_ENABLED(CONFIG_SYSCTL)
	return READ_ONCE(sysctl_devconf_inherit_init_net);
#else
	return 0;
#endif
}

static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
+1 −1
Original line number Diff line number Diff line
@@ -33,7 +33,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;

static inline bool net_busy_loop_on(void)
{
	return sysctl_net_busy_poll;
	return READ_ONCE(sysctl_net_busy_poll);
}

static inline bool sk_can_busy_loop(const struct sock *sk)
+1 −1
Original line number Diff line number Diff line
@@ -439,7 +439,7 @@ static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb,
{
	list_add_tail(&skb->list, &napi->rx_list);
	napi->rx_count += segs;
	if (napi->rx_count >= gro_normal_batch)
	if (napi->rx_count >= READ_ONCE(gro_normal_batch))
		gro_normal_list(napi);
}

+9 −3
Original line number Diff line number Diff line
@@ -26,10 +26,16 @@
 */
int ___ratelimit(struct ratelimit_state *rs, const char *func)
{
	/* Paired with WRITE_ONCE() in .proc_handler().
	 * Changing two values seperately could be inconsistent
	 * and some message could be lost.  (See: net_ratelimit_state).
	 */
	int interval = READ_ONCE(rs->interval);
	int burst = READ_ONCE(rs->burst);
	unsigned long flags;
	int ret;

	if (!rs->interval)
	if (!interval)
		return 1;

	/*
@@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
	if (!rs->begin)
		rs->begin = jiffies;

	if (time_is_before_jiffies(rs->begin + rs->interval)) {
	if (time_is_before_jiffies(rs->begin + interval)) {
		if (rs->missed) {
			if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
				printk_deferred(KERN_WARNING
@@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
		rs->begin   = jiffies;
		rs->printed = 0;
	}
	if (rs->burst && rs->burst > rs->printed) {
	if (burst && burst > rs->printed) {
		rs->printed++;
		ret = 1;
	} else {
+3 −2
Original line number Diff line number Diff line
@@ -310,11 +310,12 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
				 void *owner, u32 size)
{
	int optmem_max = READ_ONCE(sysctl_optmem_max);
	struct sock *sk = (struct sock *)owner;

	/* same check as in sock_kmalloc() */
	if (size <= sysctl_optmem_max &&
	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
	if (size <= optmem_max &&
	    atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
		atomic_add(size, &sk->sk_omem_alloc);
		return 0;
	}
Loading