Unverified Commit 2dfb1d54 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!11209 LTS patches round

Merge Pull Request from: @ci-robot 
 
PR sync from: Zhengchao Shao <shaozhengchao@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/WHOKK2FW6RBJKTYESYHJUBSGA3PDZVH2/ 
LTS patches round.

Neal Cardwell (1):
  UPSTREAM: tcp: fix DSACK undo in fast recovery to call
    tcp_try_to_open()

Yousuk Seung (1):
  tcp: add ece_ack flag to reno sack functions

Yuchung Cheng (1):
  net: tcp better handling of reordering then loss cases

zhang kai (1):
  tcp: tcp_mark_head_lost is only valid for sack-tcp


-- 
2.34.1
 
https://gitee.com/openeuler/kernel/issues/IAMPH5 
 
Link:https://gitee.com/openeuler/kernel/pulls/11209

 

Reviewed-by: default avatarLiu YongQiang <liuyongqiang13@huawei.com>
Reviewed-by: default avatarYue Haibing <yuehaibing@huawei.com>
Signed-off-by: default avatarZhang Changzhong <zhangchangzhong@huawei.com>
parents 9e6205e0 70aaabd5
Loading
Loading
Loading
Loading
+44 −53
Original line number Diff line number Diff line
@@ -1884,7 +1884,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)

/* Emulate SACKs for SACKless connection: account for a new dupack. */

static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack)
{
	if (num_dupack) {
		struct tcp_sock *tp = tcp_sk(sk);
@@ -1902,7 +1902,7 @@ static void tcp_add_reno_sack(struct sock *sk, int num_dupack)

/* Account for ACK, ACKing some data in Reno Recovery phase. */

static void tcp_remove_reno_sacks(struct sock *sk, int acked)
static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack)
{
	struct tcp_sock *tp = tcp_sk(sk);

@@ -2189,8 +2189,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
}

/* Detect loss in event "A" above by marking head of queue up as lost.
 * For non-SACK(Reno) senders, the first "packets" number of segments
 * are considered lost. For RFC3517 SACK, a segment is considered lost if it
 * For RFC3517 SACK, a segment is considered lost if it
 * has at least tp->reordering SACKed seqments above it; "packets" refers to
 * the maximum SACKed segments to pass before reaching this limit.
 */
@@ -2198,10 +2197,9 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
{
	struct tcp_sock *tp = tcp_sk(sk);
	struct sk_buff *skb;
	int cnt, oldcnt, lost;
	unsigned int mss;
	int cnt;
	/* Use SACK to deduce losses of new sequences sent during recovery */
	const u32 loss_high = tcp_is_sack(tp) ?  tp->snd_nxt : tp->high_seq;
	const u32 loss_high = tp->snd_nxt;

	WARN_ON(packets > tp->packets_out);
	skb = tp->lost_skb_hint;
@@ -2224,27 +2222,12 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
		if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
			break;

		oldcnt = cnt;
		if (tcp_is_reno(tp) ||
		    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
			cnt += tcp_skb_pcount(skb);

		if (cnt > packets) {
			if (tcp_is_sack(tp) ||
			    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
			    (oldcnt >= packets))
		if (cnt > packets)
			break;

			mss = tcp_skb_mss(skb);
			/* If needed, chop off the prefix to mark as lost. */
			lost = (packets - oldcnt) * mss;
			if (lost < skb->len &&
			    tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
					 lost, mss, GFP_ATOMIC) < 0)
				break;
			cnt = packets;
		}

		tcp_skb_mark_lost(tp, skb);

		if (mark_head)
@@ -2733,15 +2716,24 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
		 * delivered. Lower inflight to clock out (re)tranmissions.
		 */
		if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
			tcp_add_reno_sack(sk, num_dupack);
			tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE);
		else if (flag & FLAG_SND_UNA_ADVANCED)
			tcp_reset_reno_sack(tp);
	}
	*rexmit = REXMIT_LOST;
}

static bool tcp_force_fast_retransmit(struct sock *sk)
{
	struct tcp_sock *tp = tcp_sk(sk);

	return after(tcp_highest_sack_seq(tp),
		     tp->snd_una + tp->reordering * tp->mss_cache);
}

/* Undo during fast recovery after partial ACK. */
static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
				 bool *do_lost)
{
	struct tcp_sock *tp = tcp_sk(sk);

@@ -2766,7 +2758,9 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
		tcp_undo_cwnd_reduction(sk, true);
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
		tcp_try_keep_open(sk);
		return true;
	} else {
		/* Partial ACK arrived. Force fast retransmit. */
		*do_lost = tcp_force_fast_retransmit(sk);
	}
	return false;
}
@@ -2790,14 +2784,6 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
	}
}

static bool tcp_force_fast_retransmit(struct sock *sk)
{
	struct tcp_sock *tp = tcp_sk(sk);

	return after(tcp_highest_sack_seq(tp),
		     tp->snd_una + tp->reordering * tp->mss_cache);
}

/* Process an event, which can update packets-in-flight not trivially.
 * Main goal of this function is to calculate new estimate for left_out,
 * taking into account both packets sitting in receiver's buffer and
@@ -2816,6 +2802,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct tcp_sock *tp = tcp_sk(sk);
	int fast_rexmit = 0, flag = *ack_flag;
	bool ece_ack = flag & FLAG_ECE;
	bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
				      tcp_force_fast_retransmit(sk));

@@ -2824,7 +2811,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,

	/* Now state machine starts.
	 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
	if (flag & FLAG_ECE)
	if (ece_ack)
		tp->prior_ssthresh = 0;

	/* B. In all the states check for reneging SACKs. */
@@ -2865,19 +2852,22 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
	case TCP_CA_Recovery:
		if (!(flag & FLAG_SND_UNA_ADVANCED)) {
			if (tcp_is_reno(tp))
				tcp_add_reno_sack(sk, num_dupack);
		} else {
			if (tcp_try_undo_partial(sk, prior_snd_una))
				tcp_add_reno_sack(sk, num_dupack, ece_ack);
		} else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost))
			return;
			/* Partial ACK arrived. Force fast retransmit. */
			do_lost = tcp_is_reno(tp) ||
				  tcp_force_fast_retransmit(sk);
		}
		if (tcp_try_undo_dsack(sk)) {
			tcp_try_keep_open(sk);

		if (tcp_try_undo_dsack(sk))
			tcp_try_to_open(sk, flag);

		tcp_identify_packet_loss(sk, ack_flag);
		if (icsk->icsk_ca_state != TCP_CA_Recovery) {
			if (!tcp_time_to_recover(sk, flag))
				return;
			/* Undo reverts the recovery state. If loss is evident,
			 * starts a new recovery (e.g. reordering then loss);
			 */
			tcp_enter_recovery(sk, ece_ack);
		}
		tcp_identify_packet_loss(sk, ack_flag);
		break;
	case TCP_CA_Loss:
		tcp_process_loss(sk, flag, num_dupack, rexmit);
@@ -2891,7 +2881,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
		if (tcp_is_reno(tp)) {
			if (flag & FLAG_SND_UNA_ADVANCED)
				tcp_reset_reno_sack(tp);
			tcp_add_reno_sack(sk, num_dupack);
			tcp_add_reno_sack(sk, num_dupack, ece_ack);
		}

		if (icsk->icsk_ca_state <= TCP_CA_Disorder)
@@ -2915,7 +2905,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
		}

		/* Otherwise enter Recovery state */
		tcp_enter_recovery(sk, (flag & FLAG_ECE));
		tcp_enter_recovery(sk, ece_ack);
		fast_rexmit = 1;
	}

@@ -3091,7 +3081,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
 */
static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
			       u32 prior_snd_una,
			       struct tcp_sacktag_state *sack)
			       struct tcp_sacktag_state *sack, bool ece_ack)
{
	const struct inet_connection_sock *icsk = inet_csk(sk);
	u64 first_ackt, last_ackt;
@@ -3229,7 +3219,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
		}

		if (tcp_is_reno(tp)) {
			tcp_remove_reno_sacks(sk, pkts_acked);
			tcp_remove_reno_sacks(sk, pkts_acked, ece_ack);

			/* If any of the cumulatively ACKed segments was
			 * retransmitted, non-SACK case cannot confirm that
@@ -3734,7 +3724,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
		goto no_queue;

	/* See if we can take anything off of the retransmit queue. */
	flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state);
	flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state,
				    flag & FLAG_ECE);

	tcp_rack_update_reo_wnd(sk, &rs);