Commit fac08010 authored by Paolo Abeni's avatar Paolo Abeni Committed by Wen Zhiwei
Browse files

mptcp: don't always assume copied data in mptcp_cleanup_rbuf()

stable inclusion
from stable-v6.6.70
commit f61e663d78ff9def3eabad63ae2d64a80513cb82
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBOHV1

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=f61e663d78ff9def3eabad63ae2d64a80513cb82



--------------------------------

commit 551844f26da2a9f76c0a698baaffa631d1178645 upstream.

Under some corner cases the MPTCP protocol can end-up invoking
mptcp_cleanup_rbuf() when no data has been copied, but such helper
assumes the opposite condition.

Explicitly drop such assumption and performs the costly call only
when strictly needed - before releasing the msk socket lock.

Fixes: fd897679 ("mptcp: be careful on MPTCP-level ack.")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Reviewed-by: default avatarMat Martineau <martineau@kernel.org>
Signed-off-by: default avatarMatthieu Baerts (NGI0) <matttbe@kernel.org>
Link: https://patch.msgid.link/20241230-net-mptcp-rbuf-fixes-v1-2-8608af434ceb@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarWen Zhiwei <wenzhiwei@kylinos.cn>
parent b9fe5f60
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -527,13 +527,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
		mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
}

static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
{
	bool slow;

	slow = lock_sock_fast(ssk);
	if (tcp_can_send_ack(ssk))
		tcp_cleanup_rbuf(ssk, 1);
		tcp_cleanup_rbuf(ssk, copied);
	unlock_sock_fast(ssk, slow);
}

@@ -550,7 +550,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
			      (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
}

static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
{
	int old_space = READ_ONCE(msk->old_wspace);
	struct mptcp_subflow_context *subflow;
@@ -558,14 +558,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
	int space =  __mptcp_space(sk);
	bool cleanup, rx_empty;

	cleanup = (space > 0) && (space >= (old_space << 1));
	rx_empty = !__mptcp_rmem(sk);
	cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
	rx_empty = !__mptcp_rmem(sk) && copied;

	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);

		if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
			mptcp_subflow_cleanup_rbuf(ssk);
			mptcp_subflow_cleanup_rbuf(ssk, copied);
	}
}

@@ -2215,9 +2215,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,

		copied += bytes_read;

		/* be sure to advertise window change */
		mptcp_cleanup_rbuf(msk);

		if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
			continue;

@@ -2266,6 +2263,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
		}

		pr_debug("block timeout %ld\n", timeo);
		mptcp_cleanup_rbuf(msk, copied);
		err = sk_wait_data(sk, &timeo, NULL);
		if (err < 0) {
			err = copied ? : err;
@@ -2273,6 +2271,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
		}
	}

	mptcp_cleanup_rbuf(msk, copied);

out_err:
	if (cmsg_flags && copied >= 0) {
		if (cmsg_flags & MPTCP_CMSG_TS)