Commit d288a162 authored by Wangyang Guo's avatar Wangyang Guo Committed by Jakub Kicinski
Browse files

net: dst: Prevent false sharing vs. dst_entry:: __refcnt



dst_entry::__refcnt is highly contended in scenarios where many connections
happen from and to the same IP. The reference count is an atomic_t, so the
reference count operations have to take the cache-line exclusive.

Aside of the unavoidable reference count contention there is another
significant problem which is caused by that: False sharing.

perf top identified two affected read accesses. dst_entry::lwtstate and
rtable::rt_genid.

dst_entry:__refcnt is located at offset 64 of dst_entry, which puts it into
a seperate cacheline vs. the read mostly members located at the beginning
of the struct.

That prevents false sharing vs. the struct members in the first 64
bytes of the structure, but there is also

  dst_entry::lwtstate

which is located after the reference count and in the same cache line. This
member is read after a reference count has been acquired.

struct rtable embeds a struct dst_entry at offset 0. struct dst_entry has a
size of 112 bytes, which means that the struct members of rtable which
follow the dst member share the same cache line as dst_entry::__refcnt.
Especially

  rtable::rt_genid

is also read by the contexts which have a reference count acquired
already.

When dst_entry:__refcnt is incremented or decremented via an atomic
operation these read accesses stall. This was found when analysing the
memtier benchmark in 1:100 mode, which amplifies the problem extremly.

Move the rt[6i]_uncached[_list] members out of struct rtable and struct
rt6_info into struct dst_entry to provide padding and move the lwtstate
member after that so it ends up in the same cache line.

The resulting improvement depends on the micro-architecture and the number
of CPUs. It ranges from +20% to +120% with a localhost memtier/memcached
benchmark.

[ tglx: Rearrange struct ]

Signed-off-by: default avatarWangyang Guo <wangyang.guo@intel.com>
Signed-off-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarDavid Ahern <dsahern@kernel.org>
Link: https://lore.kernel.org/r/20230323102800.042297517@linutronix.de


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent b133fffe
Loading
Loading
Loading
Loading
+14 −1
Original line number Diff line number Diff line
@@ -69,15 +69,28 @@ struct dst_entry {
#endif
	int			__use;
	unsigned long		lastuse;
	struct lwtunnel_state   *lwtstate;
	struct rcu_head		rcu_head;
	short			error;
	short			__pad;
	__u32			tclassid;
#ifndef CONFIG_64BIT
	struct lwtunnel_state   *lwtstate;
	atomic_t		__refcnt;	/* 32-bit offset 64 */
#endif
	netdevice_tracker	dev_tracker;

	/*
	 * Used by rtable and rt6_info. Moves lwtstate into the next cache
	 * line on 64bit so that lwtstate does not cause false sharing with
	 * __refcnt under contention of __refcnt. This also puts the
	 * frequently accessed members of rtable and rt6_info out of the
	 * __refcnt cache line.
	 */
	struct list_head	rt_uncached;
	struct uncached_list	*rt_uncached_list;
#ifdef CONFIG_64BIT
	struct lwtunnel_state   *lwtstate;
#endif
};

struct dst_metrics {
+0 −3
Original line number Diff line number Diff line
@@ -217,9 +217,6 @@ struct rt6_info {
	struct inet6_dev		*rt6i_idev;
	u32				rt6i_flags;

	struct list_head		rt6i_uncached;
	struct uncached_list		*rt6i_uncached_list;

	/* more non-fragment space at head required */
	unsigned short			rt6i_nfheader_len;
};
+1 −1
Original line number Diff line number Diff line
@@ -100,7 +100,7 @@ static inline struct dst_entry *ip6_route_output(struct net *net,
static inline void ip6_rt_put_flags(struct rt6_info *rt, int flags)
{
	if (!(flags & RT6_LOOKUP_F_DST_NOREF) ||
	    !list_empty(&rt->rt6i_uncached))
	    !list_empty(&rt->dst.rt_uncached))
		ip6_rt_put(rt);
}

+0 −3
Original line number Diff line number Diff line
@@ -78,9 +78,6 @@ struct rtable {
	/* Miscellaneous cached information */
	u32			rt_mtu_locked:1,
				rt_pmtu:31;

	struct list_head	rt_uncached;
	struct uncached_list	*rt_uncached_list;
};

static inline bool rt_is_input_route(const struct rtable *rt)
+10 −10
Original line number Diff line number Diff line
@@ -1508,20 +1508,20 @@ void rt_add_uncached_list(struct rtable *rt)
{
	struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);

	rt->rt_uncached_list = ul;
	rt->dst.rt_uncached_list = ul;

	spin_lock_bh(&ul->lock);
	list_add_tail(&rt->rt_uncached, &ul->head);
	list_add_tail(&rt->dst.rt_uncached, &ul->head);
	spin_unlock_bh(&ul->lock);
}

void rt_del_uncached_list(struct rtable *rt)
{
	if (!list_empty(&rt->rt_uncached)) {
		struct uncached_list *ul = rt->rt_uncached_list;
	if (!list_empty(&rt->dst.rt_uncached)) {
		struct uncached_list *ul = rt->dst.rt_uncached_list;

		spin_lock_bh(&ul->lock);
		list_del_init(&rt->rt_uncached);
		list_del_init(&rt->dst.rt_uncached);
		spin_unlock_bh(&ul->lock);
	}
}
@@ -1546,13 +1546,13 @@ void rt_flush_dev(struct net_device *dev)
			continue;

		spin_lock_bh(&ul->lock);
		list_for_each_entry_safe(rt, safe, &ul->head, rt_uncached) {
		list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
			if (rt->dst.dev != dev)
				continue;
			rt->dst.dev = blackhole_netdev;
			netdev_ref_replace(dev, blackhole_netdev,
					   &rt->dst.dev_tracker, GFP_ATOMIC);
			list_move(&rt->rt_uncached, &ul->quarantine);
			list_move(&rt->dst.rt_uncached, &ul->quarantine);
		}
		spin_unlock_bh(&ul->lock);
	}
@@ -1644,7 +1644,7 @@ struct rtable *rt_dst_alloc(struct net_device *dev,
		rt->rt_uses_gateway = 0;
		rt->rt_gw_family = 0;
		rt->rt_gw4 = 0;
		INIT_LIST_HEAD(&rt->rt_uncached);
		INIT_LIST_HEAD(&rt->dst.rt_uncached);

		rt->dst.output = ip_output;
		if (flags & RTCF_LOCAL)
@@ -1675,7 +1675,7 @@ struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
			new_rt->rt_gw4 = rt->rt_gw4;
		else if (rt->rt_gw_family == AF_INET6)
			new_rt->rt_gw6 = rt->rt_gw6;
		INIT_LIST_HEAD(&new_rt->rt_uncached);
		INIT_LIST_HEAD(&new_rt->dst.rt_uncached);

		new_rt->dst.input = rt->dst.input;
		new_rt->dst.output = rt->dst.output;
@@ -2859,7 +2859,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
		else if (rt->rt_gw_family == AF_INET6)
			rt->rt_gw6 = ort->rt_gw6;

		INIT_LIST_HEAD(&rt->rt_uncached);
		INIT_LIST_HEAD(&rt->dst.rt_uncached);
	}

	dst_release(dst_orig);
Loading