Loading include/linux/tcp.h +9 −0 Original line number Diff line number Diff line Loading @@ -445,4 +445,13 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) { /* We use READ_ONCE() here because socket might not be locked. * This happens for listeners. */ u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); return (user_mss && user_mss < mss) ? user_mss : mss; } #endif /* _LINUX_TCP_H */ net/ipv4/tcp_ipv4.c +1 −4 Original line number Diff line number Diff line Loading @@ -1324,10 +1324,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); if (tcp_sk(sk)->rx_opt.user_mss && tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); tcp_initialize_rcv_mss(newsk); Loading net/ipv4/tcp_minisocks.c +2 −5 Original line number Diff line number Diff line Loading @@ -360,15 +360,12 @@ void tcp_openreq_init_rwin(struct request_sock *req, { struct inet_request_sock *ireq = inet_rsk(req); const struct tcp_sock *tp = tcp_sk(sk_listener); u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); int full_space = tcp_full_space(sk_listener); int mss = dst_metric_advmss(dst); u32 window_clamp; __u8 rcv_wscale; int mss; if (user_mss && user_mss < mss) mss = user_mss; mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); window_clamp = READ_ONCE(tp->window_clamp); /* Set this up on the first call only */ req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); Loading net/ipv4/tcp_output.c +4 −10 Original line number Diff line number Diff line Loading @@ -3062,7 +3062,6 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, struct sk_buff *skb; int tcp_header_size; struct tcphdr *th; u16 user_mss; int mss; skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); Loading Loading @@ -3092,10 +3091,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, } skb_dst_set(skb, dst); mss = dst_metric_advmss(dst); user_mss = READ_ONCE(tp->rx_opt.user_mss); if (user_mss && user_mss < mss) mss = user_mss; mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); memset(&opts, 0, sizeof(opts)); #ifdef CONFIG_SYN_COOKIES Loading Loading @@ -3201,9 +3197,7 @@ static void tcp_connect_init(struct sock *sk) if (!tp->window_clamp) tp->window_clamp = dst_metric(dst, RTAX_WINDOW); tp->advmss = dst_metric_advmss(dst); if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) tp->advmss = tp->rx_opt.user_mss; tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); tcp_initialize_rcv_mss(sk); Loading Loading @@ -3280,8 +3274,8 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) * user-MSS. Reserve maximum option space for middleboxes that add * private TCP options. The cost is reduced data space in SYN :( */ if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - MAX_TCP_OPTION_SPACE; Loading net/ipv6/tcp_ipv6.c +1 −4 Original line number Diff line number Diff line Loading @@ -1147,10 +1147,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); if (tcp_sk(sk)->rx_opt.user_mss && tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); tcp_initialize_rcv_mss(newsk); Loading Loading
include/linux/tcp.h +9 −0 Original line number Diff line number Diff line Loading @@ -445,4 +445,13 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) { /* We use READ_ONCE() here because socket might not be locked. * This happens for listeners. */ u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); return (user_mss && user_mss < mss) ? user_mss : mss; } #endif /* _LINUX_TCP_H */
net/ipv4/tcp_ipv4.c +1 −4 Original line number Diff line number Diff line Loading @@ -1324,10 +1324,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); if (tcp_sk(sk)->rx_opt.user_mss && tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); tcp_initialize_rcv_mss(newsk); Loading
net/ipv4/tcp_minisocks.c +2 −5 Original line number Diff line number Diff line Loading @@ -360,15 +360,12 @@ void tcp_openreq_init_rwin(struct request_sock *req, { struct inet_request_sock *ireq = inet_rsk(req); const struct tcp_sock *tp = tcp_sk(sk_listener); u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); int full_space = tcp_full_space(sk_listener); int mss = dst_metric_advmss(dst); u32 window_clamp; __u8 rcv_wscale; int mss; if (user_mss && user_mss < mss) mss = user_mss; mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); window_clamp = READ_ONCE(tp->window_clamp); /* Set this up on the first call only */ req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); Loading
net/ipv4/tcp_output.c +4 −10 Original line number Diff line number Diff line Loading @@ -3062,7 +3062,6 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, struct sk_buff *skb; int tcp_header_size; struct tcphdr *th; u16 user_mss; int mss; skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); Loading Loading @@ -3092,10 +3091,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, } skb_dst_set(skb, dst); mss = dst_metric_advmss(dst); user_mss = READ_ONCE(tp->rx_opt.user_mss); if (user_mss && user_mss < mss) mss = user_mss; mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); memset(&opts, 0, sizeof(opts)); #ifdef CONFIG_SYN_COOKIES Loading Loading @@ -3201,9 +3197,7 @@ static void tcp_connect_init(struct sock *sk) if (!tp->window_clamp) tp->window_clamp = dst_metric(dst, RTAX_WINDOW); tp->advmss = dst_metric_advmss(dst); if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) tp->advmss = tp->rx_opt.user_mss; tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); tcp_initialize_rcv_mss(sk); Loading Loading @@ -3280,8 +3274,8 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) * user-MSS. Reserve maximum option space for middleboxes that add * private TCP options. The cost is reduced data space in SYN :( */ if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp) tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - MAX_TCP_OPTION_SPACE; Loading
net/ipv6/tcp_ipv6.c +1 −4 Original line number Diff line number Diff line Loading @@ -1147,10 +1147,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); if (tcp_sk(sk)->rx_opt.user_mss && tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); tcp_initialize_rcv_mss(newsk); Loading