Commit 5903123f authored by Akhmat Karakotov's avatar Akhmat Karakotov Committed by David S. Miller
Browse files

tcp: Use BPF timeout setting for SYN ACK RTO



When setting RTO through BPF program, some SYN ACK packets were unaffected
and continued to use TCP_TIMEOUT_INIT constant. This patch adds timeout
option to struct request_sock. Option is initialized with TCP_TIMEOUT_INIT
and is reassigned through BPF using tcp_timeout_init call. SYN ACK
retransmits now use newly added timeout option.

Signed-off-by: default avatarAkhmat Karakotov <hmukos@yandex-team.ru>
Acked-by: default avatarMartin KaFai Lau <kafai@fb.com>

v2:
	- Add timeout option to struct request_sock. Do not call
	  tcp_timeout_init on every syn ack retransmit.

v3:
	- Use unsigned long for min. Bound tcp_timeout_init to TCP_RTO_MAX.

v4:
	- Refactor duplicate code by adding reqsk_timeout function.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0b6b0d31
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -285,6 +285,14 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);

static inline unsigned long
reqsk_timeout(struct request_sock *req, unsigned long max_timeout)
{
	u64 timeout = (u64)req->timeout << req->num_timeout;

	return (unsigned long)min_t(u64, timeout, max_timeout);
}

static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
{
	/* The below has to be done to allow calling inet_csk_destroy_sock */
+2 −0
Original line number Diff line number Diff line
@@ -70,6 +70,7 @@ struct request_sock {
	struct saved_syn		*saved_syn;
	u32				secid;
	u32				peer_secid;
	u32				timeout;
};

static inline struct request_sock *inet_reqsk(const struct sock *sk)
@@ -104,6 +105,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
	sk_node_init(&req_to_sk(req)->sk_node);
	sk_tx_queue_clear(req_to_sk(req));
	req->saved_syn = NULL;
	req->timeout = 0;
	req->num_timeout = 0;
	req->num_retrans = 0;
	req->sk = NULL;
+1 −1
Original line number Diff line number Diff line
@@ -2358,7 +2358,7 @@ static inline u32 tcp_timeout_init(struct sock *sk)

	if (timeout <= 0)
		timeout = TCP_TIMEOUT_INIT;
	return timeout;
	return min_t(int, timeout, TCP_RTO_MAX);
}

static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
+1 −4
Original line number Diff line number Diff line
@@ -866,12 +866,9 @@ static void reqsk_timer_handler(struct timer_list *t)
	    (!resend ||
	     !inet_rtx_syn_ack(sk_listener, req) ||
	     inet_rsk(req)->acked)) {
		unsigned long timeo;

		if (req->num_timeout++ == 0)
			atomic_dec(&queue->young);
		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
		mod_timer(&req->rsk_timer, jiffies + timeo);
		mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX));

		if (!nreq)
			return;
+5 −3
Original line number Diff line number Diff line
@@ -6723,6 +6723,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
		ireq->ireq_state = TCP_NEW_SYN_RECV;
		write_pnet(&ireq->ireq_net, sock_net(sk_listener));
		ireq->ireq_family = sk_listener->sk_family;
		req->timeout = TCP_TIMEOUT_INIT;
	}

	return req;
@@ -6939,9 +6940,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
		sock_put(fastopen_sk);
	} else {
		tcp_rsk(req)->tfo_listener = false;
		if (!want_cookie)
			inet_csk_reqsk_queue_hash_add(sk, req,
				tcp_timeout_init((struct sock *)req));
		if (!want_cookie) {
			req->timeout = tcp_timeout_init((struct sock *)req);
			inet_csk_reqsk_queue_hash_add(sk, req, req->timeout);
		}
		af_ops->send_synack(sk, dst, &fl, req, &foc,
				    !want_cookie ? TCP_SYNACK_NORMAL :
						   TCP_SYNACK_COOKIE,
Loading