Commit a3e4abac authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'SO_RESEVED_MEM'



Wei Wang says:

====================
net: add new socket option SO_RESERVE_MEM

This patch series introduces a new socket option SO_RESERVE_MEM.
This socket option provides a mechanism for users to reserve a certain
amount of memory for the socket to use. When this option is set, kernel
charges the user specified amount of memory to memcg, as well as
sk_forward_alloc. This amount of memory is not reclaimable and is
available in sk_forward_alloc for this socket.
With this socket option set, the networking stack spends less cycles
doing forward alloc and reclaim, which should lead to better system
performance, with the cost of an amount of pre-allocated and
unreclaimable memory, even under memory pressure.
With a tcp_stream test with 10 flows running on a simulated 100ms RTT
link, I can see the cycles spent in __sk_mem_raise_allocated() dropping
by ~0.02%. Not a whole lot, since we already have logic in
sk_mem_uncharge() to only reclaim 1MB when sk_forward_alloc has more
than 2MB free space. But on a system suffering memory pressure
constently, the savings should be more.

The first patch is the implementation of this socket option. The
following 2 patches change the tcp stack to make use of this reserved
memory when under memory pressure. This makes the tcp stack behavior
more flexible when under memory pressure, and provides a way for user to
control the distribution of the memory among its sockets.
With a TCP connection on a simulated 100ms RTT link, the default
throughput under memory pressure is ~500Kbps. With SO_RESERVE_MEM set to
100KB, the throughput under memory pressure goes up to ~3.5Mbps.

Change since v2:
- Added description for new field added in struct sock in patch 1
Change since v1:
- Added performance stats in cover letter and rebased
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4075a6a0 053f3684
Loading
Loading
Loading
Loading
+40 −5
Original line number Diff line number Diff line
@@ -269,6 +269,7 @@ struct bpf_local_storage;
  *	@sk_omem_alloc: "o" is "option" or "other"
  *	@sk_wmem_queued: persistent queue size
  *	@sk_forward_alloc: space allocated forward
  *	@sk_reserved_mem: space reserved and non-reclaimable for the socket
  *	@sk_napi_id: id of the last napi context to receive data for sk
  *	@sk_ll_usec: usecs to busypoll when there is no data
  *	@sk_allocation: allocation mode
@@ -409,6 +410,7 @@ struct sock {
#define sk_rmem_alloc sk_backlog.rmem_alloc

	int			sk_forward_alloc;
	u32			sk_reserved_mem;
#ifdef CONFIG_NET_RX_BUSY_POLL
	unsigned int		sk_ll_usec;
	/* ===== mostly read cache line ===== */
@@ -1511,20 +1513,49 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
		skb_pfmemalloc(skb);
}

static inline int sk_unused_reserved_mem(const struct sock *sk)
{
	int unused_mem;

	if (likely(!sk->sk_reserved_mem))
		return 0;

	unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
			atomic_read(&sk->sk_rmem_alloc);

	return unused_mem > 0 ? unused_mem : 0;
}

static inline void sk_mem_reclaim(struct sock *sk)
{
	int reclaimable;

	if (!sk_has_account(sk))
		return;
	if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
		__sk_mem_reclaim(sk, sk->sk_forward_alloc);

	reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);

	if (reclaimable >= SK_MEM_QUANTUM)
		__sk_mem_reclaim(sk, reclaimable);
}

static inline void sk_mem_reclaim_final(struct sock *sk)
{
	sk->sk_reserved_mem = 0;
	sk_mem_reclaim(sk);
}

static inline void sk_mem_reclaim_partial(struct sock *sk)
{
	int reclaimable;

	if (!sk_has_account(sk))
		return;
	if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
		__sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);

	reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);

	if (reclaimable > SK_MEM_QUANTUM)
		__sk_mem_reclaim(sk, reclaimable - 1);
}

static inline void sk_mem_charge(struct sock *sk, int size)
@@ -1536,9 +1567,12 @@ static inline void sk_mem_charge(struct sock *sk, int size)

static inline void sk_mem_uncharge(struct sock *sk, int size)
{
	int reclaimable;

	if (!sk_has_account(sk))
		return;
	sk->sk_forward_alloc += size;
	reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);

	/* Avoid a possible overflow.
	 * TCP send queues can make this happen, if sk_mem_reclaim()
@@ -1547,7 +1581,7 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
	 * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
	 * no need to hold that much forward allocation anyway.
	 */
	if (unlikely(sk->sk_forward_alloc >= 1 << 21))
	if (unlikely(reclaimable >= 1 << 21))
		__sk_mem_reclaim(sk, 1 << 20);
}

@@ -2344,6 +2378,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
		return;

	val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
	val = max_t(u32, val, sk_unused_reserved_mem(sk));

	WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
}
+11 −0
Original line number Diff line number Diff line
@@ -1421,6 +1421,17 @@ static inline int tcp_full_space(const struct sock *sk)
	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}

static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
{
	int unused_mem = sk_unused_reserved_mem(sk);
	struct tcp_sock *tp = tcp_sk(sk);

	tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
	if (unused_mem)
		tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
					 tcp_win_from_space(sk, unused_mem));
}

void tcp_cleanup_rbuf(struct sock *sk, int copied);

/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
+2 −0
Original line number Diff line number Diff line
@@ -126,6 +126,8 @@

#define SO_BUF_LOCK		72

#define SO_RESERVE_MEM		73

#if !defined(__KERNEL__)

#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
+69 −0
Original line number Diff line number Diff line
@@ -947,6 +947,53 @@ void sock_set_mark(struct sock *sk, u32 val)
}
EXPORT_SYMBOL(sock_set_mark);

static void sock_release_reserved_memory(struct sock *sk, int bytes)
{
	/* Round down bytes to multiple of pages */
	bytes &= ~(SK_MEM_QUANTUM - 1);

	WARN_ON(bytes > sk->sk_reserved_mem);
	sk->sk_reserved_mem -= bytes;
	sk_mem_reclaim(sk);
}

static int sock_reserve_memory(struct sock *sk, int bytes)
{
	long allocated;
	bool charged;
	int pages;

	if (!mem_cgroup_sockets_enabled || !sk->sk_memcg)
		return -EOPNOTSUPP;

	if (!bytes)
		return 0;

	pages = sk_mem_pages(bytes);

	/* pre-charge to memcg */
	charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages,
					  GFP_KERNEL | __GFP_RETRY_MAYFAIL);
	if (!charged)
		return -ENOMEM;

	/* pre-charge to forward_alloc */
	allocated = sk_memory_allocated_add(sk, pages);
	/* If the system goes into memory pressure with this
	 * precharge, give up and return error.
	 */
	if (allocated > sk_prot_mem_limits(sk, 1)) {
		sk_memory_allocated_sub(sk, pages);
		mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
		return -ENOMEM;
	}
	sk->sk_forward_alloc += pages << SK_MEM_QUANTUM_SHIFT;

	sk->sk_reserved_mem += pages << SK_MEM_QUANTUM_SHIFT;

	return 0;
}

/*
 *	This is meant for all protocols to use and covers goings on
 *	at the socket level. Everything here is generic.
@@ -1367,6 +1414,23 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
					  ~SOCK_BUF_LOCK_MASK);
		break;

	case SO_RESERVE_MEM:
	{
		int delta;

		if (val < 0) {
			ret = -EINVAL;
			break;
		}

		delta = val - sk->sk_reserved_mem;
		if (delta < 0)
			sock_release_reserved_memory(sk, -delta);
		else
			ret = sock_reserve_memory(sk, delta);
		break;
	}

	default:
		ret = -ENOPROTOOPT;
		break;
@@ -1733,6 +1797,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
		v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK;
		break;

	case SO_RESERVE_MEM:
		v.val = sk->sk_reserved_mem;
		break;

	default:
		/* We implement the SO_SNDLOWAT etc to not be settable
		 * (1003.1g 7).
@@ -2045,6 +2113,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
	newsk->sk_dst_pending_confirm = 0;
	newsk->sk_wmem_queued	= 0;
	newsk->sk_forward_alloc = 0;
	newsk->sk_reserved_mem  = 0;
	atomic_set(&newsk->sk_drops, 0);
	newsk->sk_send_head	= NULL;
	newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
+1 −1
Original line number Diff line number Diff line
@@ -202,7 +202,7 @@ void sk_stream_kill_queues(struct sock *sk)
	WARN_ON(!skb_queue_empty(&sk->sk_write_queue));

	/* Account for returned memory. */
	sk_mem_reclaim(sk);
	sk_mem_reclaim_final(sk);

	WARN_ON(sk->sk_wmem_queued);
	WARN_ON(sk->sk_forward_alloc);
Loading