Unverified Commit 6d0b2553 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3421 backport some network patches

Merge Pull Request from: @ci-robot 
 
PR sync from: Liu Jian <liujian56@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/HE5RYQDRIYT2TCF24TRIFCIWWP2RROY7/ 
backport some network patches

Brian Vazquez (1):
  net: use indirect calls helpers for sk_exit_memory_pressure()

Eric Dumazet (5):
  net: cache align tcp_memory_allocated, tcp_sockets_allocated
  tcp: small optimization in tcp recvmsg()
  tcp: add RETPOLINE mitigation to sk_backlog_rcv
  tcp: avoid indirect calls to sock_rfree
  tcp: check local var (timeo) before socket fields in one test


-- 
2.34.1
 
https://gitee.com/openeuler/kernel/issues/I65HYE 
 
Link:https://gitee.com/openeuler/kernel/pulls/3421

 

Reviewed-by: default avatarYue Haibing <yuehaibing@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
Acked-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
parents 567ef11f fc097735
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -60,4 +60,10 @@
#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
#endif

#if IS_ENABLED(CONFIG_INET)
#define INDIRECT_CALL_INET_1(f, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
#else
#define INDIRECT_CALL_INET_1(f, f1, ...) f(__VA_ARGS__)
#endif

#endif
+7 −1
Original line number Diff line number Diff line
@@ -1051,12 +1051,18 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s

int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);

INDIRECT_CALLABLE_DECLARE(int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb));
INDIRECT_CALLABLE_DECLARE(int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb));

static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
		return __sk_backlog_rcv(sk, skb);

	return sk->sk_backlog_rcv(sk, skb);
	return INDIRECT_CALL_INET(sk->sk_backlog_rcv,
				  tcp_v6_do_rcv,
				  tcp_v4_do_rcv,
				  sk, skb);
}

static inline void sk_incoming_cpu_update(struct sock *sk)
+6 −2
Original line number Diff line number Diff line
@@ -324,7 +324,10 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));

	noreclaim_flag = memalloc_noreclaim_save();
	ret = sk->sk_backlog_rcv(sk, skb);
	ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv,
				 tcp_v6_do_rcv,
				 tcp_v4_do_rcv,
				 sk, skb);
	memalloc_noreclaim_restore(noreclaim_flag);

	return ret;
@@ -2449,7 +2452,8 @@ static void sk_enter_memory_pressure(struct sock *sk)
static void sk_leave_memory_pressure(struct sock *sk)
{
	if (sk->sk_prot->leave_memory_pressure) {
		sk->sk_prot->leave_memory_pressure(sk);
		INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure,
				     tcp_leave_memory_pressure, sk);
	} else {
		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;

+20 −11
Original line number Diff line number Diff line
@@ -286,7 +286,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_mem);

atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp;	/* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);

#if IS_ENABLED(CONFIG_SMC)
@@ -301,7 +301,7 @@ DEFINE_STATIC_KEY_FALSE(tcp_have_comp);
/*
 * Current number of TCP sockets.
 */
struct percpu_counter tcp_sockets_allocated;
struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp;
EXPORT_SYMBOL(tcp_sockets_allocated);

/*
@@ -1596,6 +1596,16 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
		tcp_send_ack(sk);
}

static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
{
	if (likely(skb->destructor == sock_rfree)) {
		sock_rfree(skb);
		skb->destructor = NULL;
		skb->sk = NULL;
	}
	sk_eat_skb(sk, skb);
}

static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
{
	struct sk_buff *skb;
@@ -1615,7 +1625,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
		 * splitted a fat GRO packet, while we released socket lock
		 * in skb_splice_bits()
		 */
		sk_eat_skb(sk, skb);
		tcp_eat_recv_skb(sk, skb);
	}
	return NULL;
}
@@ -1683,11 +1693,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
				continue;
		}
		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
			sk_eat_skb(sk, skb);
			tcp_eat_recv_skb(sk, skb);
			++seq;
			break;
		}
		sk_eat_skb(sk, skb);
		tcp_eat_recv_skb(sk, skb);
		if (!desc->count)
			break;
		WRITE_ONCE(tp->copied_seq, seq);
@@ -2205,10 +2215,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
			break;

		if (copied) {
			if (sk->sk_err ||
			if (!timeo ||
			    sk->sk_err ||
			    sk->sk_state == TCP_CLOSE ||
			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
			    !timeo ||
			    signal_pending(current))
				break;
		} else {
@@ -2246,8 +2256,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,

		if (copied >= target) {
			/* Do not sleep, just process backlog. */
			release_sock(sk);
			lock_sock(sk);
			__sk_flush_backlog(sk);
		} else {
			sk_wait_data(sk, &timeo, last);
		}
@@ -2318,14 +2327,14 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
			goto found_fin_ok;
		if (!(flags & MSG_PEEK))
			sk_eat_skb(sk, skb);
			tcp_eat_recv_skb(sk, skb);
		continue;

found_fin_ok:
		/* Process the FIN. */
		WRITE_ONCE(*seq, *seq + 1);
		if (!(flags & MSG_PEEK))
			sk_eat_skb(sk, skb);
			tcp_eat_recv_skb(sk, skb);
		break;
	} while (len > 0);

+1 −1
Original line number Diff line number Diff line
@@ -123,7 +123,7 @@ EXPORT_SYMBOL(udp_table);
long sysctl_udp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_udp_mem);

atomic_long_t udp_memory_allocated;
atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp;
EXPORT_SYMBOL(udp_memory_allocated);

#define MAX_UDP_PORTS 65536
Loading