Commit dc8cf755 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'page_pool-recycling'



Matteo Croce says:

====================
page_pool: recycle buffers

This is a respin of [1]

This patchset shows the plans for allowing page_pool to handle and
maintain DMA map/unmap of the pages it serves to the driver. For this
to work a return hook in the network core is introduced.

The overall purpose is to simplify drivers, by providing a page
allocation API that does recycling, such that each driver doesn't have
to reinvent its own recycling scheme. Using page_pool in a driver
does not require implementing XDP support, but it makes it trivially
easy to do so. Instead of allocating buffers specifically for SKBs
we now allocate a generic buffer and either wrap it on an SKB
(via build_skb) or create an XDP frame.
The recycling code leverages the XDP recycle APIs.

The Marvell mvpp2 and mvneta drivers are used in this patchset to
demonstrate how to use the API, and tested on a MacchiatoBIN
and EspressoBIN boards respectively.

Please let this going in on a future -rc1 so to allow enough time
to have wider tests.

v7 -> v8:
- use page->lru.next instead of page->index for pfmemalloc
- remove conditional include
- rework page_pool_return_skb_page() so to have less conversions
  between page and addresses, and call compound_head() only once
- move some code from skb_free_head() to a new helper skb_pp_recycle()
- misc fixes

v6 -> v7:
- refresh patches against net-next
- remove a redundant call to virt_to_head_page()
- update mvneta benchmarks

v5 -> v6:
- preserve pfmemalloc bit when setting signature
- fix typo in mvneta
- rebase on next-next with the new cache
- don't clear the skb->pp_recycle in pskb_expand_head()

v4 -> v5:
- move the signature so it doesn't alias with page->mapping
- use an invalid pointer as magic
- incorporate Matthew Wilcox's changes for pfmemalloc pages
- move the __skb_frag_unref() changes to a preliminary patch
- refactor some cpp directives
- only attempt recycling if skb->head_frag
- clear skb->pp_recycle in pskb_expand_head()

v3 -> v4:
- store a pointer to page_pool instead of xdp_mem_info
- drop a patch which reduces xdp_mem_info size
- do the recycling in the page_pool code instead of xdp_return
- remove some unused headers include
- remove some useless forward declaration

v2 -> v3:
- added missing SOBs
- CCed the MM people

v1 -> v2:
- fix a commit message
- avoid setting pp_recycle multiple times on mvneta
- squash two patches to avoid breaking bisect
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 35cba15a e4017570
Loading
Loading
Loading
Loading
+7 −4
Original line number Diff line number Diff line
@@ -2320,7 +2320,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
}

static struct sk_buff *
mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
		      struct xdp_buff *xdp, u32 desc_status)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -2331,7 +2331,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
	if (!skb)
		return ERR_PTR(-ENOMEM);

	page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data));
	skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);

	skb_reserve(skb, xdp->data - xdp->data_hard_start);
	skb_put(skb, xdp->data_end - xdp->data);
@@ -2343,7 +2343,10 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
				skb_frag_page(frag), skb_frag_off(frag),
				skb_frag_size(frag), PAGE_SIZE);
		page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
		/* We don't need to reset pp_recycle here. It's already set, so
		 * just mark fragments for recycling.
		 */
		page_pool_store_mem_info(skb_frag_page(frag), pool);
	}

	return skb;
@@ -2425,7 +2428,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
		    mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
			goto next;

		skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
		skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
		if (IS_ERR(skb)) {
			struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);

+1 −1
Original line number Diff line number Diff line
@@ -3997,7 +3997,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
		}

		if (pp)
			page_pool_release_page(pp, virt_to_page(data));
			skb_mark_for_recycle(skb, virt_to_page(data), pp);
		else
			dma_unmap_single_attrs(dev->dev.parent, dma_addr,
					       bm_pool->buf_size, DMA_FROM_DEVICE,
+1 −1
Original line number Diff line number Diff line
@@ -2503,7 +2503,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,

		if (length == 0) {
			/* don't need this page */
			__skb_frag_unref(frag);
			__skb_frag_unref(frag, false);
			--skb_shinfo(skb)->nr_frags;
		} else {
			size = min(length, (unsigned) PAGE_SIZE);
+1 −1
Original line number Diff line number Diff line
@@ -526,7 +526,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
fail:
	while (nr > 0) {
		nr--;
		__skb_frag_unref(skb_shinfo(skb)->frags + nr);
		__skb_frag_unref(skb_shinfo(skb)->frags + nr, false);
	}
	return 0;
}
+6 −5
Original line number Diff line number Diff line
@@ -1668,10 +1668,11 @@ struct address_space *page_mapping(struct page *page);
static inline bool page_is_pfmemalloc(const struct page *page)
{
	/*
	 * Page index cannot be this large so this must be
	 * a pfmemalloc page.
	 * lru.next has bit 1 set if the page is allocated from the
	 * pfmemalloc reserves.  Callers may simply overwrite it if
	 * they do not need to preserve that information.
	 */
	return page->index == -1UL;
	return (uintptr_t)page->lru.next & BIT(1);
}

/*
@@ -1680,12 +1681,12 @@ static inline bool page_is_pfmemalloc(const struct page *page)
 */
static inline void set_page_pfmemalloc(struct page *page)
{
	page->index = -1UL;
	page->lru.next = (void *)BIT(1);
}

static inline void clear_page_pfmemalloc(struct page *page)
{
	page->index = 0;
	page->lru.next = NULL;
}

/*
Loading