Commit af437584 authored by Wang Yufen's avatar Wang Yufen Committed by Zhengchao Shao
Browse files

bpf, sockmap: Add sk_rmem_alloc check for sockmap

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I8KU3B



--------------------------------

A tcp socket in a sockmap. If the packets transmission rate is very fast
and the packets receiving rate is very slow, a large number of packets
are stacked in the ingress queue on the packets receiving side. As a
result the memory is exhausted and the system ooms.

To fix, we add sk_rmem_alloc while sk_msg queued in the ingress queue
and subtract sk_rmem_alloc while sk_msg dequeued from the ingress queue
and check sk_rmem_alloc at the beginning of bpf_tcp_ingress().

Signed-off-by: default avatarWang Yufen <wangyufen@huawei.com>
Signed-off-by: default avatarZhengchao Shao <shaozhengchao@huawei.com>
parent 16cada85
Loading
Loading
Loading
Loading
+9 −1
Original line number Diff line number Diff line
@@ -444,8 +444,12 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
			if (likely(!peek)) {
				sge->offset += copy;
				sge->length -= copy;
				if (!msg_rx->skb)
				if (!msg_rx->skb) {
#if IS_ENABLED(CONFIG_NETACC_TERRACE)
					atomic_sub(copy, &sk->sk_rmem_alloc);
#endif
					sk_mem_uncharge(sk, copy);
				}
				msg_rx->sg.size -= copy;

				if (!sge->length) {
@@ -771,6 +775,10 @@ static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)

	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
		list_del(&msg->list);
#if IS_ENABLED(CONFIG_NETACC_TERRACE)
		if (!msg->skb)
			atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
#endif
		sk_msg_free(psock->sk, msg);
		kfree(msg);
	}
+11 −0
Original line number Diff line number Diff line
@@ -43,6 +43,13 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
		return -ENOMEM;

	lock_sock(sk);
#if IS_ENABLED(CONFIG_NETACC_TERRACE)
	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
		kfree(tmp);
		release_sock(sk);
		return -EAGAIN;
	}
#endif
	tmp->sg.start = msg->sg.start;
	i = msg->sg.start;
	do {
@@ -75,6 +82,10 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
	if (!ret) {
		msg->sg.start = i;
		sk_psock_queue_msg(psock, tmp);
#if IS_ENABLED(CONFIG_NETACC_TERRACE)
		if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
			atomic_add(tmp->sg.size, &sk->sk_rmem_alloc);
#endif
		sk_psock_data_ready(sk, psock);
	} else {
		sk_msg_free(sk, tmp);