Commit fcb30073 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'wireguard-fixes-for-5-12-rc1'

Jason Donenfeld says:

====================
wireguard fixes for 5.12-rc1

This series has a collection of fixes that have piled up for a little
while now, that I unfortunately didn't get a chance to send out earlier.

1) Removes unlikely() from IS_ERR(), since it's already implied.

2) Remove a bogus sparse annotation that hasn't been needed for years.

3) Addition test in the test suite for stressing parallel ndo_start_xmit.

4) Slight struct reordering in preparation for subsequent fix.

5) If skb->protocol is bogus, we no longer attempt to send icmp messages.

6) Massive memory usage fix, hit by larger deployments.

7) Fix typo in kconfig dependency logic.

(1) and (2) are tiny cleanups, and (3) is just a test, so if you're
trying to reduce churn, you could not backport these. But (4), (5), (6),
and (7) fix problems and should be applied to stable. IMO, it's probably
easiest to just apply them all to stable.
====================

Link: https://lore.kernel.org/r/20210222162549.3252778-1-Jason@zx2c4.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents fc0494ea bce24739
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -88,7 +88,7 @@ config WIREGUARD
	select CRYPTO_CURVE25519_X86 if X86 && 64BIT
	select ARM_CRYPTO if ARM
	select ARM64_CRYPTO if ARM64
	select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON
	select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
	select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
	select CRYPTO_POLY1305_ARM if ARM
	select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
+11 −10
Original line number Diff line number Diff line
@@ -138,7 +138,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
		else if (skb->protocol == htons(ETH_P_IPV6))
			net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
					    dev->name, &ipv6_hdr(skb)->daddr);
		goto err;
		goto err_icmp;
	}

	family = READ_ONCE(peer->endpoint.addr.sa_family);
@@ -157,7 +157,7 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
	} else {
		struct sk_buff *segs = skb_gso_segment(skb, 0);

		if (unlikely(IS_ERR(segs))) {
		if (IS_ERR(segs)) {
			ret = PTR_ERR(segs);
			goto err_peer;
		}
@@ -201,12 +201,13 @@ static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)

err_peer:
	wg_peer_put(peer);
err:
	++dev->stats.tx_errors;
err_icmp:
	if (skb->protocol == htons(ETH_P_IP))
		icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
	else if (skb->protocol == htons(ETH_P_IPV6))
		icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
err:
	++dev->stats.tx_errors;
	kfree_skb(skb);
	return ret;
}
@@ -234,8 +235,8 @@ static void wg_destruct(struct net_device *dev)
	destroy_workqueue(wg->handshake_receive_wq);
	destroy_workqueue(wg->handshake_send_wq);
	destroy_workqueue(wg->packet_crypt_wq);
	wg_packet_queue_free(&wg->decrypt_queue, true);
	wg_packet_queue_free(&wg->encrypt_queue, true);
	wg_packet_queue_free(&wg->decrypt_queue);
	wg_packet_queue_free(&wg->encrypt_queue);
	rcu_barrier(); /* Wait for all the peers to be actually freed. */
	wg_ratelimiter_uninit();
	memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
@@ -337,12 +338,12 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
		goto err_destroy_handshake_send;

	ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
				   true, MAX_QUEUED_PACKETS);
				   MAX_QUEUED_PACKETS);
	if (ret < 0)
		goto err_destroy_packet_crypt;

	ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
				   true, MAX_QUEUED_PACKETS);
				   MAX_QUEUED_PACKETS);
	if (ret < 0)
		goto err_free_encrypt_queue;

@@ -367,9 +368,9 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
err_uninit_ratelimiter:
	wg_ratelimiter_uninit();
err_free_decrypt_queue:
	wg_packet_queue_free(&wg->decrypt_queue, true);
	wg_packet_queue_free(&wg->decrypt_queue);
err_free_encrypt_queue:
	wg_packet_queue_free(&wg->encrypt_queue, true);
	wg_packet_queue_free(&wg->encrypt_queue);
err_destroy_packet_crypt:
	destroy_workqueue(wg->packet_crypt_wq);
err_destroy_handshake_send:
+8 −7
Original line number Diff line number Diff line
@@ -27,13 +27,14 @@ struct multicore_worker {

struct crypt_queue {
	struct ptr_ring ring;
	union {
		struct {
	struct multicore_worker __percpu *worker;
	int last_cpu;
};
		struct work_struct work;
	};

struct prev_queue {
	struct sk_buff *head, *tail, *peeked;
	struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff.
	atomic_t count;
};

struct wg_device {
+9 −19
Original line number Diff line number Diff line
@@ -32,27 +32,22 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
	peer = kzalloc(sizeof(*peer), GFP_KERNEL);
	if (unlikely(!peer))
		return ERR_PTR(ret);
	peer->device = wg;
	if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
		goto err;

	peer->device = wg;
	wg_noise_handshake_init(&peer->handshake, &wg->static_identity,
				public_key, preshared_key, peer);
	if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
		goto err_1;
	if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false,
				 MAX_QUEUED_PACKETS))
		goto err_2;
	if (wg_packet_queue_init(&peer->rx_queue, NULL, false,
				 MAX_QUEUED_PACKETS))
		goto err_3;

	peer->internal_id = atomic64_inc_return(&peer_counter);
	peer->serial_work_cpu = nr_cpumask_bits;
	wg_cookie_init(&peer->latest_cookie);
	wg_timers_init(peer);
	wg_cookie_checker_precompute_peer_keys(peer);
	spin_lock_init(&peer->keypairs.keypair_update_lock);
	INIT_WORK(&peer->transmit_handshake_work,
		  wg_packet_handshake_send_worker);
	INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker);
	INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker);
	wg_prev_queue_init(&peer->tx_queue);
	wg_prev_queue_init(&peer->rx_queue);
	rwlock_init(&peer->endpoint_lock);
	kref_init(&peer->refcount);
	skb_queue_head_init(&peer->staged_packet_queue);
@@ -68,11 +63,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
	pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id);
	return peer;

err_3:
	wg_packet_queue_free(&peer->tx_queue, false);
err_2:
	dst_cache_destroy(&peer->endpoint_cache);
err_1:
err:
	kfree(peer);
	return ERR_PTR(ret);
}
@@ -197,8 +188,7 @@ static void rcu_release(struct rcu_head *rcu)
	struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu);

	dst_cache_destroy(&peer->endpoint_cache);
	wg_packet_queue_free(&peer->rx_queue, false);
	wg_packet_queue_free(&peer->tx_queue, false);
	WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue));

	/* The final zeroing takes care of clearing any remaining handshake key
	 * material and other potentially sensitive information.
+4 −4
Original line number Diff line number Diff line
@@ -36,16 +36,17 @@ struct endpoint {

struct wg_peer {
	struct wg_device *device;
	struct crypt_queue tx_queue, rx_queue;
	struct prev_queue tx_queue, rx_queue;
	struct sk_buff_head staged_packet_queue;
	int serial_work_cpu;
	bool is_dead;
	struct noise_keypairs keypairs;
	struct endpoint endpoint;
	struct dst_cache endpoint_cache;
	rwlock_t endpoint_lock;
	struct noise_handshake handshake;
	atomic64_t last_sent_handshake;
	struct work_struct transmit_handshake_work, clear_peer_work;
	struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work;
	struct cookie latest_cookie;
	struct hlist_node pubkey_hash;
	u64 rx_bytes, tx_bytes;
@@ -61,9 +62,8 @@ struct wg_peer {
	struct rcu_head rcu;
	struct list_head peer_list;
	struct list_head allowedips_list;
	u64 internal_id;
	struct napi_struct napi;
	bool is_dead;
	u64 internal_id;
};

struct wg_peer *wg_peer_create(struct wg_device *wg,
Loading