Commit 13463f73 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'mptcp-mem-scheduling'



Mat Martineau says:

====================
mptcp: Updates for mem scheduling and SK_RECLAIM

In the "net: reduce tcp_memory_allocated inflation" series (merge commit
e10b02ee), Eric Dumazet noted that "Removal of SK_RECLAIM_CHUNK and
SK_RECLAIM_THRESHOLD is left to MPTCP maintainers as a follow up."

Patches 1-3 align MPTCP with the above TCP changes to forward memory
allocation, reclaim, and memory scheduling.

Patch 4 removes the SK_RECLAIM_* macros as Eric requested.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 456bfd9d e918c137
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -1619,11 +1619,6 @@ static inline void sk_mem_charge(struct sock *sk, int size)
	sk->sk_forward_alloc -= size;
}

/* the following macros control memory reclaiming in mptcp_rmem_uncharge()
 */
#define SK_RECLAIM_THRESHOLD	(1 << 21)
#define SK_RECLAIM_CHUNK	(1 << 20)

static inline void sk_mem_uncharge(struct sock *sk, int size)
{
	if (!sk_has_account(sk))
+7 −42
Original line number Diff line number Diff line
@@ -181,8 +181,8 @@ static void mptcp_rmem_uncharge(struct sock *sk, int size)
	reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);

	/* see sk_mem_uncharge() for the rationale behind the following schema */
	if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD))
		__mptcp_rmem_reclaim(sk, SK_RECLAIM_CHUNK);
	if (unlikely(reclaimable >= PAGE_SIZE))
		__mptcp_rmem_reclaim(sk, reclaimable);
}

static void mptcp_rfree(struct sk_buff *skb)
@@ -323,20 +323,16 @@ static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
	struct mptcp_sock *msk = mptcp_sk(sk);
	int amt, amount;

	if (size < msk->rmem_fwd_alloc)
	if (size <= msk->rmem_fwd_alloc)
		return true;

	size -= msk->rmem_fwd_alloc;
	amt = sk_mem_pages(size);
	amount = amt << PAGE_SHIFT;
	msk->rmem_fwd_alloc += amount;
	if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) {
		if (ssk->sk_forward_alloc < amount) {
			msk->rmem_fwd_alloc -= amount;
	if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
		return false;
		}

		ssk->sk_forward_alloc -= amount;
	}
	msk->rmem_fwd_alloc += amount;
	return true;
}

@@ -966,25 +962,6 @@ static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
		df->data_seq + df->data_len == msk->write_seq;
}

static void __mptcp_mem_reclaim_partial(struct sock *sk)
{
	int reclaimable = mptcp_sk(sk)->rmem_fwd_alloc - sk_unused_reserved_mem(sk);

	lockdep_assert_held_once(&sk->sk_lock.slock);

	if (reclaimable > (int)PAGE_SIZE)
		__mptcp_rmem_reclaim(sk, reclaimable - 1);

	sk_mem_reclaim(sk);
}

static void mptcp_mem_reclaim_partial(struct sock *sk)
{
	mptcp_data_lock(sk);
	__mptcp_mem_reclaim_partial(sk);
	mptcp_data_unlock(sk);
}

static void dfrag_uncharge(struct sock *sk, int len)
{
	sk_mem_uncharge(sk, len);
@@ -1004,7 +981,6 @@ static void __mptcp_clean_una(struct sock *sk)
{
	struct mptcp_sock *msk = mptcp_sk(sk);
	struct mptcp_data_frag *dtmp, *dfrag;
	bool cleaned = false;
	u64 snd_una;

	/* on fallback we just need to ignore snd_una, as this is really
@@ -1027,7 +1003,6 @@ static void __mptcp_clean_una(struct sock *sk)
		}

		dfrag_clear(sk, dfrag);
		cleaned = true;
	}

	dfrag = mptcp_rtx_head(sk);
@@ -1049,7 +1024,6 @@ static void __mptcp_clean_una(struct sock *sk)
		dfrag->already_sent -= delta;

		dfrag_uncharge(sk, delta);
		cleaned = true;
	}

	/* all retransmitted data acked, recovery completed */
@@ -1057,9 +1031,6 @@ static void __mptcp_clean_una(struct sock *sk)
		msk->recovery = false;

out:
	if (cleaned && tcp_under_memory_pressure(sk))
		__mptcp_mem_reclaim_partial(sk);

	if (snd_una == READ_ONCE(msk->snd_nxt) &&
	    snd_una == READ_ONCE(msk->write_seq)) {
		if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
@@ -1211,12 +1182,6 @@ static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, boo
{
	gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation;

	if (unlikely(tcp_under_memory_pressure(sk))) {
		if (data_lock_held)
			__mptcp_mem_reclaim_partial(sk);
		else
			mptcp_mem_reclaim_partial(sk);
	}
	return __mptcp_alloc_tx_skb(sk, ssk, gfp);
}