Unverified Commit 64559050 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!7174 CVE-2022-48689

Merge Pull Request from: @ci-robot 
 
PR sync from: Ziyang Xuan <william.xuanziyang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/ESLPNX6FCN75IO6XQGP47ELMQ2FPHIPN/ 
Patchset of CVE-2022-48689.

Eric Dumazet (1):
  tcp: TX zerocopy should not sense pfmemalloc status

Paolo Abeni (1):
  tcp: factor out tcp_build_frag()

Pavel Begunkov (1):
  net: introduce __skb_fill_page_desc_noacc

Yunsheng Lin (1):
  net: skbuff: update comment about pfmemalloc propagating


-- 
2.25.1
 
https://gitee.com/src-openeuler/kernel/issues/I9LK5X 
 
Link:https://gitee.com/openeuler/kernel/pulls/7174

 

Reviewed-by: default avatarYue Haibing <yuehaibing@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 96f54a86 005ea49f
Loading
Loading
Loading
Loading
+34 −7
Original line number Diff line number Diff line
@@ -2173,6 +2173,17 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
	return skb_headlen(skb) + __skb_pagelen(skb);
}

static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo,
					      int i, struct page *page,
					      int off, int size)
{
	skb_frag_t *frag = &shinfo->frags[i];

	frag->bv_page		  = page;
	frag->bv_offset		  = off;
	skb_frag_size_set(frag, size);
}

/**
 * __skb_fill_page_desc - initialise a paged fragment in an skb
 * @skb: buffer containing fragment to be initialised
@@ -2189,17 +2200,12 @@ static inline unsigned int skb_pagelen(const struct sk_buff *skb)
static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
					struct page *page, int off, int size)
{
	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
	__skb_fill_page_desc_noacc(skb_shinfo(skb), i, page, off, size);

	/*
	 * Propagate page pfmemalloc to the skb if we can. The problem is
	/* Propagate page pfmemalloc to the skb if we can. The problem is
	 * that not all callers have unique ownership of the page but rely
	 * on page_is_pfmemalloc doing the right thing(tm).
	 */
	frag->bv_page		  = page;
	frag->bv_offset		  = off;
	skb_frag_size_set(frag, size);

	page = compound_head(page);
	if (page_is_pfmemalloc(page))
		skb->pfmemalloc	= true;
@@ -2226,6 +2232,27 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
	skb_shinfo(skb)->nr_frags = i + 1;
}

/**
 * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
 * @skb: buffer containing fragment to be initialised
 * @i: paged fragment index to initialise
 * @page: the page to use for this fragment
 * @off: the offset to the data with @page
 * @size: the length of the data
 *
 * Variant of skb_fill_page_desc() which does not deal with
 * pfmemalloc, if page is not owned by us.
 */
static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
					    struct page *page, int off,
					    int size)
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);

	__skb_fill_page_desc_noacc(shinfo, i, page, off, size);
	shinfo->nr_frags = i + 1;
}

void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
		     int size, unsigned int truesize);

+3 −0
Original line number Diff line number Diff line
@@ -319,6 +319,7 @@ void tcp_shutdown(struct sock *sk, int how);
int tcp_v4_early_demux(struct sk_buff *skb);
int tcp_v4_rcv(struct sk_buff *skb);

void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb);
int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
@@ -326,6 +327,8 @@ int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
		 int flags);
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
			size_t size, int flags);
struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
			       struct page *page, int offset, size_t *size);
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
		 size_t size, int flags);
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
+1 −1
Original line number Diff line number Diff line
@@ -677,7 +677,7 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
				page_ref_sub(last_head, refs);
				refs = 0;
			}
			skb_fill_page_desc(skb, frag++, head, start, size);
			skb_fill_page_desc_noacc(skb, frag++, head, start, size);
		}
		if (refs)
			page_ref_sub(last_head, refs);
+67 −52
Original line number Diff line number Diff line
@@ -962,7 +962,7 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
 * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
 * users.
 */
static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
{
	if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
		tcp_unlink_write_queue(skb, sk);
@@ -972,55 +972,24 @@ static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
	}
}

ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
			 size_t size, int flags)
struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
			       struct page *page, int offset, size_t *size)
{
	struct tcp_sock *tp = tcp_sk(sk);
	int mss_now, size_goal;
	int err;
	ssize_t copied;
	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);

	if (IS_ENABLED(CONFIG_DEBUG_VM) &&
	    WARN_ONCE(!sendpage_ok(page),
		      "page must not be a Slab one and have page_count > 0"))
		return -EINVAL;

	/* Wait for a connection to finish. One exception is TCP Fast Open
	 * (passive side) where data is allowed to be sent before a connection
	 * is fully established.
	 */
	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
	    !tcp_passive_fastopen(sk)) {
		err = sk_stream_wait_connect(sk, &timeo);
		if (err != 0)
			goto out_err;
	}

	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);

	mss_now = tcp_send_mss(sk, &size_goal, flags);
	copied = 0;

	err = -EPIPE;
	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
		goto out_err;

	while (size > 0) {
	struct sk_buff *skb = tcp_write_queue_tail(sk);
		int copy, i;
	struct tcp_sock *tp = tcp_sk(sk);
	bool can_coalesce;
	int copy, i;

	if (!skb || (copy = size_goal - skb->len) <= 0 ||
	    !tcp_skb_can_collapse_to(skb)) {
new_segment:
		if (!sk_stream_memory_free(sk))
				goto wait_for_space;
			return NULL;

		skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
					  tcp_rtx_and_write_queues_empty(sk));
		if (!skb)
				goto wait_for_space;
			return NULL;

#ifdef CONFIG_TLS_DEVICE
		skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
@@ -1029,8 +998,8 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
		copy = size_goal;
	}

		if (copy > size)
			copy = size;
	if (copy > *size)
		copy = *size;

	i = skb_shinfo(skb)->nr_frags;
	can_coalesce = skb_can_coalesce(skb, i, page, offset);
@@ -1039,13 +1008,13 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
		goto new_segment;
	}
	if (!sk_wmem_schedule(sk, copy))
			goto wait_for_space;
		return NULL;

	if (can_coalesce) {
		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
	} else {
		get_page(page);
			skb_fill_page_desc(skb, i, page, offset, copy);
		skb_fill_page_desc_noacc(skb, i, page, offset, copy);
	}

	if (!(flags & MSG_NO_SHARED_FRAGS))
@@ -1061,6 +1030,52 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
	TCP_SKB_CB(skb)->end_seq += copy;
	tcp_skb_pcount_set(skb, 0);

	*size = copy;
	return skb;
}

ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
			 size_t size, int flags)
{
	struct tcp_sock *tp = tcp_sk(sk);
	int mss_now, size_goal;
	int err;
	ssize_t copied;
	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);

	if (IS_ENABLED(CONFIG_DEBUG_VM) &&
	    WARN_ONCE(!sendpage_ok(page),
		      "page must not be a Slab one and have page_count > 0"))
		return -EINVAL;

	/* Wait for a connection to finish. One exception is TCP Fast Open
	 * (passive side) where data is allowed to be sent before a connection
	 * is fully established.
	 */
	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
	    !tcp_passive_fastopen(sk)) {
		err = sk_stream_wait_connect(sk, &timeo);
		if (err != 0)
			goto out_err;
	}

	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);

	mss_now = tcp_send_mss(sk, &size_goal, flags);
	copied = 0;

	err = -EPIPE;
	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
		goto out_err;

	while (size > 0) {
		struct sk_buff *skb;
		size_t copy = size;

		skb = tcp_build_frag(sk, size_goal, flags, page, offset, &copy);
		if (!skb)
			goto wait_for_space;

		if (!copied)
			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;