Commit b97af722 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'UDP-sock_wfree-opts'

Pavel Begunkov says:

====================
UDP sock_wfree optimisations

The series is not UDP specific but that the main beneficiary. 2/3 saves one
atomic in sock_wfree() and on top 3/3 removes an extra barrier.
Tested with UDP over dummy netdev, 2038491 -> 2099071 req/s (or around +3%).

note: in regards to 1/3, there is a "Should agree with poll..." comment
that I don't completely get, and there is no git history to explain it.
Though I can't see how it could rely on having the second check without
racing with tasks woken by wake_up*().

The series was split from a larger patchset, see
https://lore.kernel.org/netdev/cover.1648981570.git.asml.silence@gmail.com/


====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0ed99ecc 0a8afd9f
Loading
Loading
Loading
Loading
+40 −3
Original line number Diff line number Diff line
@@ -146,6 +146,9 @@
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);

static void sock_def_write_space_wfree(struct sock *sk);
static void sock_def_write_space(struct sock *sk);

/**
 * sk_ns_capable - General socket capability test
 * @sk: Socket to use a capability on or through
@@ -2324,8 +2327,20 @@ void sock_wfree(struct sk_buff *skb)
{
	struct sock *sk = skb->sk;
	unsigned int len = skb->truesize;
	bool free;

	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
		if (sock_flag(sk, SOCK_RCU_FREE) &&
		    sk->sk_write_space == sock_def_write_space) {
			rcu_read_lock();
			free = refcount_sub_and_test(len, &sk->sk_wmem_alloc);
			sock_def_write_space_wfree(sk);
			rcu_read_unlock();
			if (unlikely(free))
				__sk_free(sk);
			return;
		}

		/*
		 * Keep a reference on sk_wmem_alloc, this will be released
		 * after sk_write_space() call
@@ -3191,20 +3206,42 @@ static void sock_def_write_space(struct sock *sk)
	/* Do not wake up a writer until he can make "significant"
	 * progress.  --DaveM
	 */
	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
	if (sock_writeable(sk)) {
		wq = rcu_dereference(sk->sk_wq);
		if (skwq_has_sleeper(wq))
			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
						EPOLLWRNORM | EPOLLWRBAND);

		/* Should agree with poll, otherwise some programs break */
		if (sock_writeable(sk))
		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
	}

	rcu_read_unlock();
}

/* An optimised version of sock_def_write_space(), should only be called
 * for SOCK_RCU_FREE sockets under RCU read section and after putting
 * ->sk_wmem_alloc.
 */
static void sock_def_write_space_wfree(struct sock *sk)
{
	/* Do not wake up a writer until he can make "significant"
	 * progress.  --DaveM
	 */
	if (sock_writeable(sk)) {
		struct socket_wq *wq = rcu_dereference(sk->sk_wq);

		/* rely on refcount_sub from sock_wfree() */
		smp_mb__after_atomic();
		if (wq && waitqueue_active(&wq->wait))
			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
						EPOLLWRNORM | EPOLLWRBAND);

		/* Should agree with poll, otherwise some programs break */
		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
	}
}

static void sock_def_destruct(struct sock *sk)
{
}