Commit d51991e2 authored by Paolo Abeni's avatar Paolo Abeni Committed by Jakub Kicinski
Browse files

mptcp: fix shutdown vs fallback race



If the MPTCP socket shutdown happens before a fallback
to TCP, and all the pending data have been already spooled,
we never close the TCP connection.

Address the issue explicitly checking for critical condition
at fallback time.

Fixes: 1e39e5a3 ("mptcp: infinite mapping sending")
Fixes: 0348c690 ("mptcp: add the fallback check")
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Signed-off-by: default avatarMat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 76a13b31
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -967,7 +967,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
			goto reset;
		subflow->mp_capable = 0;
		pr_fallback(msk);
		__mptcp_do_fallback(msk);
		mptcp_do_fallback(ssk);
		return false;
	}

+1 −1
Original line number Diff line number Diff line
@@ -1245,7 +1245,7 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk,
	MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
	mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
	pr_fallback(msk);
	__mptcp_do_fallback(msk);
	mptcp_do_fallback(ssk);
}

static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
+16 −3
Original line number Diff line number Diff line
@@ -927,12 +927,25 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
	set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
}

static inline void mptcp_do_fallback(struct sock *sk)
static inline void mptcp_do_fallback(struct sock *ssk)
{
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
	struct mptcp_sock *msk = mptcp_sk(subflow->conn);
	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
	struct sock *sk = subflow->conn;
	struct mptcp_sock *msk;

	msk = mptcp_sk(sk);
	__mptcp_do_fallback(msk);
	if (READ_ONCE(msk->snd_data_fin_enable) && !(ssk->sk_shutdown & SEND_SHUTDOWN)) {
		gfp_t saved_allocation = ssk->sk_allocation;

		/* we are in a atomic (BH) scope, override ssk default for data
		 * fin allocation
		 */
		ssk->sk_allocation = GFP_ATOMIC;
		ssk->sk_shutdown |= SEND_SHUTDOWN;
		tcp_shutdown(ssk, SEND_SHUTDOWN);
		ssk->sk_allocation = saved_allocation;
	}
}

#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
+1 −1
Original line number Diff line number Diff line
@@ -1279,7 +1279,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
			return false;
		}

		__mptcp_do_fallback(msk);
		mptcp_do_fallback(ssk);
	}

	skb = skb_peek(&ssk->sk_receive_queue);