Commit ebc1e08f authored by Geliang Tang's avatar Geliang Tang Committed by Jakub Kicinski
Browse files

mptcp: drop last_snd and MPTCP_RESET_SCHEDULER



Since the burst check conditions have moved out of the function
mptcp_subflow_get_send(), it makes all msk->last_snd useless.
This patch drops them as well as the macro MPTCP_RESET_SCHEDULER.

Reviewed-by: default avatarMat Martineau <martineau@kernel.org>
Signed-off-by: default avatarGeliang Tang <geliang.tang@suse.com>
Signed-off-by: default avatarMat Martineau <martineau@kernel.org>
Link: https://lore.kernel.org/r/20230821-upstream-net-next-20230818-v1-2-0c860fb256a8@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c5b4297d
Loading
Loading
Loading
Loading
+1 −8
Original line number Diff line number Diff line
@@ -299,15 +299,8 @@ void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup)

	pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup);
	msk = mptcp_sk(sk);
	if (subflow->backup != bkup) {
	if (subflow->backup != bkup)
		subflow->backup = bkup;
		mptcp_data_lock(sk);
		if (!sock_owned_by_user(sk))
			msk->last_snd = NULL;
		else
			__set_bit(MPTCP_RESET_SCHEDULER,  &msk->cb_flags);
		mptcp_data_unlock(sk);
	}

	mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC);
}
+0 −3
Original line number Diff line number Diff line
@@ -472,9 +472,6 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con

	slow = lock_sock_fast(ssk);
	if (prio) {
		if (subflow->backup != backup)
			msk->last_snd = NULL;

		subflow->send_mp_prio = 1;
		subflow->backup = backup;
		subflow->request_bkup = backup;
+1 −10
Original line number Diff line number Diff line
@@ -1438,16 +1438,13 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)

	burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
	wmem = READ_ONCE(ssk->sk_wmem_queued);
	if (!burst) {
		msk->last_snd = NULL;
	if (!burst)
		return ssk;
	}

	subflow = mptcp_subflow_ctx(ssk);
	subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
					   READ_ONCE(ssk->sk_pacing_rate) * burst,
					   burst + wmem);
	msk->last_snd = ssk;
	msk->snd_burst = burst;
	return ssk;
}
@@ -2379,9 +2376,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
		WRITE_ONCE(msk->first, NULL);

out:
	if (ssk == msk->last_snd)
		msk->last_snd = NULL;

	if (need_push)
		__mptcp_push_pending(sk, 0);
}
@@ -3046,7 +3040,6 @@ static int mptcp_disconnect(struct sock *sk, int flags)
	 * subflow
	 */
	mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
	msk->last_snd = NULL;
	WRITE_ONCE(msk->flags, 0);
	msk->cb_flags = 0;
	msk->push_pending = 0;
@@ -3316,8 +3309,6 @@ static void mptcp_release_cb(struct sock *sk)
			__mptcp_set_connected(sk);
		if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
			__mptcp_error_report(sk);
		if (__test_and_clear_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags))
			msk->last_snd = NULL;
	}

	__mptcp_update_rmem(sk);
+0 −2
Original line number Diff line number Diff line
@@ -123,7 +123,6 @@
#define MPTCP_RETRANSMIT	4
#define MPTCP_FLUSH_JOIN_LIST	5
#define MPTCP_CONNECTED		6
#define MPTCP_RESET_SCHEDULER	7

struct mptcp_skb_cb {
	u64 map_seq;
@@ -269,7 +268,6 @@ struct mptcp_sock {
	u64		rcv_data_fin_seq;
	u64		bytes_retrans;
	int		rmem_fwd_alloc;
	struct sock	*last_snd;
	int		snd_burst;
	int		old_wspace;
	u64		recovery_snd_nxt;	/* in recovery mode accept up to this seq;