Commit d5fbeff1 authored by Paolo Abeni's avatar Paolo Abeni Committed by David S. Miller
Browse files

mptcp: move __mptcp_error_report in protocol.c



This will simplify the next patch ("mptcp: process pending subflow error
on close").

No functional change intended.

Cc: stable@vger.kernel.org # v5.12+
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Reviewed-by: default avatarMat Martineau <martineau@kernel.org>
Signed-off-by: default avatarMatthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6bec0411
Loading
Loading
Loading
Loading
+36 −0
Original line number Diff line number Diff line
@@ -770,6 +770,42 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
	return moved;
}

void __mptcp_error_report(struct sock *sk)
{
	struct mptcp_subflow_context *subflow;
	struct mptcp_sock *msk = mptcp_sk(sk);

	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
		int err = sock_error(ssk);
		int ssk_state;

		if (!err)
			continue;

		/* only propagate errors on fallen-back sockets or
		 * on MPC connect
		 */
		if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
			continue;

		/* We need to propagate only transition to CLOSE state.
		 * Orphaned socket will see such state change via
		 * subflow_sched_work_if_closed() and that path will properly
		 * destroy the msk as needed.
		 */
		ssk_state = inet_sk_state_load(ssk);
		if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
			inet_sk_state_store(sk, ssk_state);
		WRITE_ONCE(sk->sk_err, -err);

		/* This barrier is coupled with smp_rmb() in mptcp_poll() */
		smp_wmb();
		sk_error_report(sk);
		break;
	}
}

/* In most cases we will be able to lock the mptcp socket.  If its already
 * owned, we need to defer to the work queue to avoid ABBA deadlock.
 */
+0 −36
Original line number Diff line number Diff line
@@ -1362,42 +1362,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
	*full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}

void __mptcp_error_report(struct sock *sk)
{
	struct mptcp_subflow_context *subflow;
	struct mptcp_sock *msk = mptcp_sk(sk);

	mptcp_for_each_subflow(msk, subflow) {
		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
		int err = sock_error(ssk);
		int ssk_state;

		if (!err)
			continue;

		/* only propagate errors on fallen-back sockets or
		 * on MPC connect
		 */
		if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
			continue;

		/* We need to propagate only transition to CLOSE state.
		 * Orphaned socket will see such state change via
		 * subflow_sched_work_if_closed() and that path will properly
		 * destroy the msk as needed.
		 */
		ssk_state = inet_sk_state_load(ssk);
		if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
			inet_sk_state_store(sk, ssk_state);
		WRITE_ONCE(sk->sk_err, -err);

		/* This barrier is coupled with smp_rmb() in mptcp_poll() */
		smp_wmb();
		sk_error_report(sk);
		break;
	}
}

static void subflow_error_report(struct sock *ssk)
{
	struct sock *sk = mptcp_subflow_ctx(ssk)->conn;