Commit 701b9519 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'tcp-tx-side-cleanups'



Eric Dumazet says:

====================
tcp: tx side cleanups

We no longer need to set skb->reserved_tailroom because
TCP sendmsg() do not put payload in skb->head anymore.

Also do some cleanups around skb->ip_summed/csum,
and CP_SKB_CB(skb)->sacked for fresh skbs.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 911e3a46 8b7d8c2b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -311,7 +311,7 @@ void tcp_shutdown(struct sock *sk, int how);
int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);

void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb);
void tcp_remove_empty_skb(struct sock *sk);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
+7 −22
Original line number Diff line number Diff line
@@ -658,10 +658,8 @@ void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
	struct tcp_sock *tp = tcp_sk(sk);
	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);

	skb->csum    = 0;
	tcb->seq     = tcb->end_seq = tp->write_seq;
	tcb->tcp_flags = TCPHDR_ACK;
	tcb->sacked  = 0;
	__skb_header_release(skb);
	tcp_add_write_queue_tail(sk, skb);
	sk_wmem_queued_add(sk, skb->truesize);
@@ -876,11 +874,7 @@ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
		}
		if (likely(mem_scheduled)) {
			skb_reserve(skb, MAX_TCP_HEADER);
			/*
			 * Make sure that we have exactly size bytes
			 * available to the caller, no more, no less.
			 */
			skb->reserved_tailroom = skb->end - skb->tail - size;
			skb->ip_summed = CHECKSUM_PARTIAL;
			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
			return skb;
		}
@@ -933,8 +927,10 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
 * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
 * users.
 */
void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
void tcp_remove_empty_skb(struct sock *sk)
{
	struct sk_buff *skb = tcp_write_queue_tail(sk);

	if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
		tcp_unlink_write_queue(skb, sk);
		if (tcp_write_queue_empty(sk))
@@ -996,7 +992,6 @@ static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
	skb->truesize += copy;
	sk_wmem_queued_add(sk, copy);
	sk_mem_charge(sk, copy);
	skb->ip_summed = CHECKSUM_PARTIAL;
	WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
	TCP_SKB_CB(skb)->end_seq += copy;
	tcp_skb_pcount_set(skb, 0);
@@ -1087,7 +1082,7 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
	return copied;

do_error:
	tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
	tcp_remove_empty_skb(sk);
	if (copied)
		goto out;
out_err:
@@ -1292,7 +1287,6 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
				goto wait_for_space;

			process_backlog++;
			skb->ip_summed = CHECKSUM_PARTIAL;

			tcp_skb_entail(sk, skb);
			copy = size_goal;
@@ -1309,14 +1303,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
		if (copy > msg_data_left(msg))
			copy = msg_data_left(msg);

		/* Where to copy to? */
		if (skb_availroom(skb) > 0 && !zc) {
			/* We have some space in skb head. Superb! */
			copy = min_t(int, copy, skb_availroom(skb));
			err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
			if (err)
				goto do_fault;
		} else if (!zc) {
		if (!zc) {
			bool merge = true;
			int i = skb_shinfo(skb)->nr_frags;
			struct page_frag *pfrag = sk_page_frag(sk);
@@ -1415,9 +1402,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
	return copied + copied_syn;

do_error:
	skb = tcp_write_queue_tail(sk);
do_fault:
	tcp_remove_empty_skb(sk, skb);
	tcp_remove_empty_skb(sk);

	if (copied + copied_syn)
		goto out;
+3 −19
Original line number Diff line number Diff line
@@ -394,7 +394,6 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
	skb->ip_summed = CHECKSUM_PARTIAL;

	TCP_SKB_CB(skb)->tcp_flags = flags;
	TCP_SKB_CB(skb)->sacked = 0;

	tcp_skb_pcount_set(skb, 1);

@@ -1590,8 +1589,6 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,

	skb_split(skb, buff, len);

	buff->ip_summed = CHECKSUM_PARTIAL;

	buff->tstamp = skb->tstamp;
	tcp_fragment_tstamp(skb, buff);

@@ -1676,7 +1673,6 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
	delta_truesize = __pskb_trim_head(skb, len);

	TCP_SKB_CB(skb)->seq += len;
	skb->ip_summed = CHECKSUM_PARTIAL;

	if (delta_truesize) {
		skb->truesize	   -= delta_truesize;
@@ -2142,12 +2138,8 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
	TCP_SKB_CB(buff)->tcp_flags = flags;

	/* This packet was never sent out yet, so no SACK bits. */
	TCP_SKB_CB(buff)->sacked = 0;

	tcp_skb_fragment_eor(skb, buff);

	buff->ip_summed = CHECKSUM_PARTIAL;
	skb_split(skb, buff, len);
	tcp_fragment_tstamp(skb, buff);

@@ -2401,9 +2393,6 @@ static int tcp_mtu_probe(struct sock *sk)
	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
	TCP_SKB_CB(nskb)->sacked = 0;
	nskb->csum = 0;
	nskb->ip_summed = CHECKSUM_PARTIAL;

	tcp_insert_write_queue_before(nskb, skb, sk);
	tcp_highest_sack_replace(sk, skb, nskb);
@@ -3045,13 +3034,9 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)

	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);

	if (next_skb_size) {
		if (next_skb_size <= skb_availroom(skb))
			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
				      next_skb_size);
		else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
	if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size))
		return false;
	}

	tcp_highest_sack_replace(sk, next_skb, skb);

	/* Update sequence range on original skb. */
@@ -3757,7 +3742,6 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
	syn_data = tcp_stream_alloc_skb(sk, space, sk->sk_allocation, false);
	if (!syn_data)
		goto fallback;
	syn_data->ip_summed = CHECKSUM_PARTIAL;
	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
	if (space) {
		int copied = copy_from_iter(skb_put(syn_data, space), space,
+0 −1
Original line number Diff line number Diff line
@@ -969,7 +969,6 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
	fl6.flowlabel = label;

	buff->ip_summed = CHECKSUM_PARTIAL;
	buff->csum = 0;

	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);

+3 −4
Original line number Diff line number Diff line
@@ -1174,7 +1174,7 @@ static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp)
	if (likely(skb)) {
		if (likely(__mptcp_add_ext(skb, gfp))) {
			skb_reserve(skb, MAX_TCP_HEADER);
			skb->reserved_tailroom = skb->end - skb->tail;
			skb->ip_summed = CHECKSUM_PARTIAL;
			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
			return skb;
		}
@@ -1291,7 +1291,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
		u64 snd_una = READ_ONCE(msk->snd_una);

		if (snd_una != msk->snd_nxt) {
			tcp_remove_empty_skb(ssk, tcp_write_queue_tail(ssk));
			tcp_remove_empty_skb(ssk);
			return 0;
		}

@@ -1307,7 +1307,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,

	copy = min_t(size_t, copy, info->limit - info->sent);
	if (!sk_wmem_schedule(ssk, copy)) {
		tcp_remove_empty_skb(ssk, tcp_write_queue_tail(ssk));
		tcp_remove_empty_skb(ssk);
		return -ENOMEM;
	}

@@ -1323,7 +1323,6 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
	skb->truesize += copy;
	sk_wmem_queued_add(ssk, copy);
	sk_mem_charge(ssk, copy);
	skb->ip_summed = CHECKSUM_PARTIAL;
	WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy);
	TCP_SKB_CB(skb)->end_seq += copy;
	tcp_skb_pcount_set(skb, 0);