Commit 8818e269 authored by Wang Yufen's avatar Wang Yufen Committed by Zheng Zengkai
Browse files

bpf, sockmap: Add sk_rmem_alloc check for sockmap

hulk inclusion
category: feature
bugzilla: 186640, https://gitee.com/openeuler/kernel/issues/I545NW



--------------------------------

A tcp socket in a sockmap. If the packets transmission rate is very fast
and the packets receiving rate is very slow, a large number of packets
are stacked in the ingress queue on the packets receiving side. As a
result the memory is exhausted and the system ooms.

To fix, we add sk_rmem_alloc while sk_msg queued in the ingress queue
and subtract sk_rmem_alloc while sk_msg dequeued from the ingress queue
and check sk_rmem_alloc at the beginning of bpf_tcp_ingress().

Signed-off-by: default avatarWang Yufen <wangyufen@huawei.com>
Reviewed-by: default avatarLiu Jian <liujian56@huawei.com>
Reviewed-by: default avatarWei Yongjun <weiyongjun1@huawei.com>
Reviewed-by: default avatarYue Haibing <yuehaibing@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 8207c85b
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -659,6 +659,7 @@ void __sk_psock_purge_ingress_msg(struct sk_psock *psock)

	list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
		list_del(&msg->list);
		atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc);
		sk_msg_free(psock->sk, msg);
		kfree(msg);
	}
+9 −1
Original line number Diff line number Diff line
@@ -43,8 +43,10 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
			if (likely(!peek)) {
				sge->offset += copy;
				sge->length -= copy;
				if (!msg_rx->skb)
				if (!msg_rx->skb) {
					atomic_sub(copy, &sk->sk_rmem_alloc);
					sk_mem_uncharge(sk, copy);
				}
				msg_rx->sg.size -= copy;

				if (!sge->length) {
@@ -98,6 +100,11 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
		return -ENOMEM;

	lock_sock(sk);
	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
		kfree(tmp);
		release_sock(sk);
		return -EAGAIN;
	}
	tmp->sg.start = msg->sg.start;
	i = msg->sg.start;
	do {
@@ -127,6 +134,7 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
	if (!ret) {
		msg->sg.start = i;
		sk_psock_queue_msg(psock, tmp);
		atomic_add(tmp->sg.size, &sk->sk_rmem_alloc);
		sk_psock_data_ready(sk, psock);
	} else {
		sk_msg_free(sk, tmp);