Commit c905dee6 authored by Kuniyuki Iwashima's avatar Kuniyuki Iwashima Committed by Daniel Borkmann
Browse files

tcp: Migrate TCP_NEW_SYN_RECV requests at retransmitting SYN+ACKs.



As with the preceding patch, this patch changes reqsk_timer_handler() to
call reuseport_migrate_sock() and inet_reqsk_clone() to migrate in-flight
requests at retransmitting SYN+ACKs. If we can select a new listener and
clone the request, we resume setting the SYN+ACK timer for the new req. If
we can set the timer, we call inet_ehash_insert() to unhash the old req and
put the new req into ehash.

The noteworthy point here is that by unhashing the old req, another CPU
processing it may lose the "own_req" race in tcp_v[46]_syn_recv_sock() and
drop the final ACK packet. However, the new timer will recover this
situation.

Signed-off-by: default avatarKuniyuki Iwashima <kuniyu@amazon.co.jp>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarMartin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20210612123224.12525-7-kuniyu@amazon.co.jp
parent 54b92e84
Loading
Loading
Loading
Loading
+69 −6
Original line number Diff line number Diff line
@@ -734,10 +734,22 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
	return nreq;
}

static void reqsk_queue_migrated(struct request_sock_queue *queue,
				 const struct request_sock *req)
{
	if (req->num_timeout == 0)
		atomic_inc(&queue->young);
	atomic_inc(&queue->qlen);
}

static void reqsk_migrate_reset(struct request_sock *req)
{
	req->saved_syn = NULL;
#if IS_ENABLED(CONFIG_IPV6)
	inet_rsk(req)->ipv6_opt = NULL;
	inet_rsk(req)->pktopts = NULL;
#else
	inet_rsk(req)->ireq_opt = NULL;
#endif
}

@@ -781,15 +793,39 @@ EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
static void reqsk_timer_handler(struct timer_list *t)
{
	struct request_sock *req = from_timer(req, t, rsk_timer);
	struct request_sock *nreq = NULL, *oreq = req;
	struct sock *sk_listener = req->rsk_listener;
	struct net *net = sock_net(sk_listener);
	struct inet_connection_sock *icsk = inet_csk(sk_listener);
	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
	struct inet_connection_sock *icsk;
	struct request_sock_queue *queue;
	struct net *net;
	int max_syn_ack_retries, qlen, expire = 0, resend = 0;

	if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
		struct sock *nsk;

		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
		if (!nsk)
			goto drop;

		nreq = inet_reqsk_clone(req, nsk);
		if (!nreq)
			goto drop;

		/* The new timer for the cloned req can decrease the 2
		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
		 * hold another count to prevent use-after-free and
		 * call reqsk_put() just before return.
		 */
		refcount_set(&nreq->rsk_refcnt, 2 + 1);
		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);

		req = nreq;
		sk_listener = nsk;
	}

	icsk = inet_csk(sk_listener);
	net = sock_net(sk_listener);
	max_syn_ack_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
	/* Normally all the openreqs are young and become mature
	 * (i.e. converted to established socket) for first timeout.
@@ -808,6 +844,7 @@ static void reqsk_timer_handler(struct timer_list *t)
	 * embrions; and abort old ones without pity, if old
	 * ones are about to clog our table.
	 */
	queue = &icsk->icsk_accept_queue;
	qlen = reqsk_queue_len(queue);
	if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
		int young = reqsk_queue_len_young(queue) << 1;
@@ -832,10 +869,36 @@ static void reqsk_timer_handler(struct timer_list *t)
			atomic_dec(&queue->young);
		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
		mod_timer(&req->rsk_timer, jiffies + timeo);

		if (!nreq)
			return;

		if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
			/* delete timer */
			inet_csk_reqsk_queue_drop(sk_listener, nreq);
			goto drop;
		}

		reqsk_migrate_reset(oreq);
		reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
		reqsk_put(oreq);

		reqsk_put(nreq);
		return;
	}

drop:
	inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
	/* Even if we can clone the req, we may need not retransmit any more
	 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
	 * CPU may win the "own_req" race so that inet_ehash_insert() fails.
	 */
	if (nreq) {
		reqsk_migrate_reset(nreq);
		reqsk_queue_removed(queue, nreq);
		__reqsk_free(nreq);
	}

	inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
}

static void reqsk_queue_hash_req(struct request_sock *req,