Commit 762405e3 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mptcp-next'



Mat Martineau says:

====================
mptcp: Protocol in-use tracking and code cleanup

Here's a collection of commits from the MPTCP tree:

Patches 1-4 and 6 contain miscellaneous code cleanup for more consistent
use of helper functions, existing local variables, and better naming.

Patches 5, 7, and 9 add sock_prot_inuse tracking for MPTCP and an
associated self test.

Patch 8 modifies the mptcp_connect self test tool to exit on SIGUSR1
when in "slow mode".
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fb59bf28 e04a30f7
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -1594,8 +1594,7 @@ void mptcp_write_options(struct tcphdr *th, __be32 *ptr, struct tcp_sock *tp,
				      TCPOLEN_MPTCP_PRIO,
				      opts->backup, TCPOPT_NOP);

		MPTCP_INC_STATS(sock_net((const struct sock *)tp),
				MPTCP_MIB_MPPRIOTX);
		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPPRIOTX);
	}

mp_capable_done:
+2 −3
Original line number Diff line number Diff line
@@ -1143,7 +1143,7 @@ void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ss
			if (!tcp_rtx_and_write_queues_empty(ssk)) {
				subflow->stale = 1;
				__mptcp_retransmit_pending_data(sk);
				MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_SUBFLOWSTALE);
				MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE);
			}
			unlock_sock_fast(ssk, slow);

@@ -1903,8 +1903,7 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
	}

	if (token)
		return mptcp_userspace_pm_set_flags(sock_net(skb->sk),
						    token, &addr, &remote, bkup);
		return mptcp_userspace_pm_set_flags(net, token, &addr, &remote, bkup);

	spin_lock_bh(&pernet->lock);
	entry = __lookup_addr(pernet, &addr.addr, lookup_by_id);
+24 −14
Original line number Diff line number Diff line
@@ -923,9 +923,8 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
{
	struct mptcp_subflow_context *subflow;
	struct sock *sk = (struct sock *)msk;

	sock_owned_by_me(sk);
	msk_owned_by_me(msk);

	mptcp_for_each_subflow(msk, subflow) {
		if (READ_ONCE(subflow->data_avail))
@@ -1408,7 +1407,7 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
	u64 linger_time;
	long tout = 0;

	sock_owned_by_me(sk);
	msk_owned_by_me(msk);

	if (__mptcp_check_fallback(msk)) {
		if (!msk->first)
@@ -1890,7 +1889,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
	u32 time, advmss = 1;
	u64 rtt_us, mstamp;

	sock_owned_by_me(sk);
	msk_owned_by_me(msk);

	if (copied <= 0)
		return;
@@ -2217,7 +2216,7 @@ static struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
	struct mptcp_subflow_context *subflow;
	int min_stale_count = INT_MAX;

	sock_owned_by_me((const struct sock *)msk);
	msk_owned_by_me(msk);

	if (__mptcp_check_fallback(msk))
		return NULL;
@@ -2724,8 +2723,8 @@ static int mptcp_init_sock(struct sock *sk)
	mptcp_ca_reset(sk);

	sk_sockets_allocated_inc(sk);
	sk->sk_rcvbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
	sk->sk_sndbuf = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
	sk->sk_rcvbuf = READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]);
	sk->sk_sndbuf = READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]);

	return 0;
}
@@ -2892,6 +2891,12 @@ static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
	return EPOLLIN | EPOLLRDNORM;
}

static void mptcp_listen_inuse_dec(struct sock *sk)
{
	if (inet_sk_state_load(sk) == TCP_LISTEN)
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
}

bool __mptcp_close(struct sock *sk, long timeout)
{
	struct mptcp_subflow_context *subflow;
@@ -2901,6 +2906,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
	sk->sk_shutdown = SHUTDOWN_MASK;

	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
		mptcp_listen_inuse_dec(sk);
		inet_sk_state_store(sk, TCP_CLOSE);
		goto cleanup;
	}
@@ -3001,6 +3007,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
	if (msk->fastopening)
		return 0;

	mptcp_listen_inuse_dec(sk);
	inet_sk_state_store(sk, TCP_CLOSE);

	mptcp_stop_timer(sk);
@@ -3639,12 +3646,13 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
static int mptcp_listen(struct socket *sock, int backlog)
{
	struct mptcp_sock *msk = mptcp_sk(sock->sk);
	struct sock *sk = sock->sk;
	struct socket *ssock;
	int err;

	pr_debug("msk=%p", msk);

	lock_sock(sock->sk);
	lock_sock(sk);
	ssock = __mptcp_nmpc_socket(msk);
	if (!ssock) {
		err = -EINVAL;
@@ -3652,18 +3660,20 @@ static int mptcp_listen(struct socket *sock, int backlog)
	}

	mptcp_token_destroy(msk);
	inet_sk_state_store(sock->sk, TCP_LISTEN);
	sock_set_flag(sock->sk, SOCK_RCU_FREE);
	inet_sk_state_store(sk, TCP_LISTEN);
	sock_set_flag(sk, SOCK_RCU_FREE);

	err = ssock->ops->listen(ssock, backlog);
	inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk));
	if (!err)
		mptcp_copy_inaddrs(sock->sk, ssock->sk);
	inet_sk_state_store(sk, inet_sk_state_load(ssock->sk));
	if (!err) {
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
		mptcp_copy_inaddrs(sk, ssock->sk);
	}

	mptcp_event_pm_listener(ssock->sk, MPTCP_EVENT_LISTENER_CREATED);

unlock:
	release_sock(sock->sk);
	release_sock(sk);
	return err;
}

+1 −1
Original line number Diff line number Diff line
@@ -754,7 +754,7 @@ static inline void mptcp_token_init_request(struct request_sock *req)

int mptcp_token_new_request(struct request_sock *req);
void mptcp_token_destroy_request(struct request_sock *req);
int mptcp_token_new_connect(struct sock *sk);
int mptcp_token_new_connect(struct sock *ssk);
void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
			struct mptcp_sock *msk);
bool mptcp_token_exists(u32 token);
+1 −1
Original line number Diff line number Diff line
@@ -18,7 +18,7 @@

static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
{
	sock_owned_by_me((const struct sock *)msk);
	msk_owned_by_me(msk);

	if (likely(!__mptcp_check_fallback(msk)))
		return NULL;
Loading