Commit d5e7d196 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'skb_frag_fill_page_desc'

Yunsheng Lin says:

====================
net: introduce skb_frag_fill_page_desc()

Most users use __skb_frag_set_page()/skb_frag_off_set()/
skb_frag_size_set() to fill the page desc for a skb frag.
It does not make much sense to calling __skb_frag_set_page()
without calling skb_frag_off_set(), as the offset may depend
on whether the page is head page or tail page, so add
skb_frag_fill_page_desc() to fill the page desc for a skb
frag.

In the future, we can make sure the page in the frag is
head page of compound page or a base page, if not, we
may warn about that and convert the tail page to head
page and update the offset accordingly, if we see a warning
about that, we also fix the caller to fill the head page
in the frag. when the fixing is done, we may remove the
warning and converting.

In this way, we can remove the compound_head() or use
page_ref_*() like the below case:
https://elixir.bootlin.com/linux/latest/source/net/core/page_pool.c#L881
https://elixir.bootlin.com/linux/latest/source/include/linux/skbuff.h#L3383



It may also convert net stack to use the folio easier.

V1: repost with all the ack/review tags included.
RFC: remove a local variable as pointed out by Simon.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 305c0418 278fda0d
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -532,10 +532,10 @@ static bool aq_add_rx_fragment(struct device *dev,
					      buff_->rxdata.pg_off,
					      buff_->len,
					      DMA_FROM_DEVICE);
		skb_frag_off_set(frag, buff_->rxdata.pg_off);
		skb_frag_size_set(frag, buff_->len);
		sinfo->xdp_frags_size += buff_->len;
		__skb_frag_set_page(frag, buff_->rxdata.page);
		skb_frag_fill_page_desc(frag, buff_->rxdata.page,
					buff_->rxdata.pg_off,
					buff_->len);

		buff_->is_cleaned = 1;

+0 −1
Original line number Diff line number Diff line
@@ -2955,7 +2955,6 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
		shinfo = skb_shinfo(skb);
		shinfo->nr_frags--;
		page = skb_frag_page(&shinfo->frags[shinfo->nr_frags]);
		__skb_frag_set_page(&shinfo->frags[shinfo->nr_frags], NULL);

		cons_rx_pg->page = page;
		dev_kfree_skb(skb);
+3 −7
Original line number Diff line number Diff line
@@ -1085,9 +1085,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;

		cons_rx_buf = &rxr->rx_agg_ring[cons];
		skb_frag_off_set(frag, cons_rx_buf->offset);
		skb_frag_size_set(frag, frag_len);
		__skb_frag_set_page(frag, cons_rx_buf->page);
		skb_frag_fill_page_desc(frag, cons_rx_buf->page,
					cons_rx_buf->offset, frag_len);
		shinfo->nr_frags = i + 1;
		__clear_bit(cons, rxr->rx_agg_bmap);

@@ -1103,10 +1102,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
			xdp_buff_set_frag_pfmemalloc(xdp);

		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
			unsigned int nr_frags;

			nr_frags = --shinfo->nr_frags;
			__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
			--shinfo->nr_frags;
			cons_rx_buf->page = page;

			/* Update prod since possibly some pages have been
+2 −3
Original line number Diff line number Diff line
@@ -2184,9 +2184,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
	len -= offset;

	rx_frag += nr_frags;
	__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
	skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
	skb_frag_size_set(rx_frag, len);
	skb_frag_fill_page_desc(rx_frag, sd->pg_chunk.page,
				sd->pg_chunk.offset + offset, len);

	skb->len += len;
	skb->data_len += len;
+17 −15
Original line number Diff line number Diff line
@@ -2343,10 +2343,9 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
		hdr_len = ETH_HLEN;
		memcpy(skb->data, start, hdr_len);
		skb_shinfo(skb)->nr_frags = 1;
		skb_frag_set_page(skb, 0, page_info->page);
		skb_frag_off_set(&skb_shinfo(skb)->frags[0],
				 page_info->page_offset + hdr_len);
		skb_frag_size_set(&skb_shinfo(skb)->frags[0],
		skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[0],
					page_info->page,
					page_info->page_offset + hdr_len,
					curr_frag_len - hdr_len);
		skb->data_len = curr_frag_len - hdr_len;
		skb->truesize += rx_frag_size;
@@ -2369,16 +2368,17 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
		if (page_info->page_offset == 0) {
			/* Fresh page */
			j++;
			skb_frag_set_page(skb, j, page_info->page);
			skb_frag_off_set(&skb_shinfo(skb)->frags[j],
					 page_info->page_offset);
			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
			skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
						page_info->page,
						page_info->page_offset,
						curr_frag_len);
			skb_shinfo(skb)->nr_frags++;
		} else {
			put_page(page_info->page);
			skb_frag_size_add(&skb_shinfo(skb)->frags[j],
					  curr_frag_len);
		}

		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
		skb->len += curr_frag_len;
		skb->data_len += curr_frag_len;
		skb->truesize += rx_frag_size;
@@ -2451,14 +2451,16 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
		if (i == 0 || page_info->page_offset == 0) {
			/* First frag or Fresh page */
			j++;
			skb_frag_set_page(skb, j, page_info->page);
			skb_frag_off_set(&skb_shinfo(skb)->frags[j],
					 page_info->page_offset);
			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
			skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[j],
						page_info->page,
						page_info->page_offset,
						curr_frag_len);
		} else {
			put_page(page_info->page);
			skb_frag_size_add(&skb_shinfo(skb)->frags[j],
					  curr_frag_len);
		}
		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);

		skb->truesize += rx_frag_size;
		remaining -= curr_frag_len;
		memset(page_info, 0, sizeof(*page_info));
Loading