Commit 4ef3960e authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'add-frag-page-support-in-page-pool'

Yunsheng Lin says:

====================
add frag page support in page pool

This patchset adds frag page support in page pool and
enable skb's page frag recycling based on page pool in
hns3 drvier.
====================

Link: https://lore.kernel.org/r/1628217982-53533-1-git-send-email-linyunsheng@huawei.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2a2b6e36 93188e96
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -91,6 +91,7 @@ config HNS3
	tristate "Hisilicon Network Subsystem Support HNS3 (Framework)"
	depends on PCI
	select NET_DEVLINK
	select PAGE_POOL
	help
	  This selects the framework support for Hisilicon Network Subsystem 3.
	  This layer facilitates clients like ENET, RoCE and user-space ethernet
+74 −5
Original line number Diff line number Diff line
@@ -3205,6 +3205,21 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
	unsigned int order = hns3_page_order(ring);
	struct page *p;

	if (ring->page_pool) {
		p = page_pool_dev_alloc_frag(ring->page_pool,
					     &cb->page_offset,
					     hns3_buf_size(ring));
		if (unlikely(!p))
			return -ENOMEM;

		cb->priv = p;
		cb->buf = page_address(p);
		cb->dma = page_pool_get_dma_addr(p);
		cb->type = DESC_TYPE_PP_FRAG;
		cb->reuse_flag = 0;
		return 0;
	}

	p = dev_alloc_pages(order);
	if (!p)
		return -ENOMEM;
@@ -3227,8 +3242,13 @@ static void hns3_free_buffer(struct hns3_enet_ring *ring,
	if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
			DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB))
		napi_consume_skb(cb->priv, budget);
	else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
	else if (!HNAE3_IS_TX_RING(ring)) {
		if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias)
			__page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
		else if (cb->type & DESC_TYPE_PP_FRAG)
			page_pool_put_full_page(ring->page_pool, cb->priv,
						false);
	}
	memset(cb, 0, sizeof(*cb));
}

@@ -3315,7 +3335,7 @@ static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
	int ret;

	ret = hns3_alloc_buffer(ring, cb);
	if (ret)
	if (ret || ring->page_pool)
		goto out;

	ret = hns3_map_buffer(ring, cb);
@@ -3337,7 +3357,8 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
	if (ret)
		return ret;

	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
					 ring->desc_cb[i].page_offset);

	return 0;
}
@@ -3367,7 +3388,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
{
	hns3_unmap_buffer(ring, &ring->desc_cb[i]);
	ring->desc_cb[i] = *res_cb;
	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
					 ring->desc_cb[i].page_offset);
	ring->desc[i].rx.bd_base_info = 0;
}

@@ -3539,6 +3561,12 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
	u32 frag_size = size - pull_len;
	bool reused;

	if (ring->page_pool) {
		skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
				frag_size, truesize);
		return;
	}

	/* Avoid re-using remote or pfmem page */
	if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
		goto out;
@@ -3856,6 +3884,9 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
		/* We can reuse buffer as-is, just make sure it is reusable */
		if (dev_page_is_reusable(desc_cb->priv))
			desc_cb->reuse_flag = 1;
		else if (desc_cb->type & DESC_TYPE_PP_FRAG)
			page_pool_put_full_page(ring->page_pool, desc_cb->priv,
						false);
		else /* This page cannot be reused so discard it */
			__page_frag_cache_drain(desc_cb->priv,
						desc_cb->pagecnt_bias);
@@ -3863,6 +3894,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
		hns3_rx_ring_move_fw(ring);
		return 0;
	}

	if (ring->page_pool)
		skb_mark_for_recycle(skb);

	u64_stats_update_begin(&ring->syncp);
	ring->stats.seg_pkt_cnt++;
	u64_stats_update_end(&ring->syncp);
@@ -3901,6 +3936,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
					    "alloc rx fraglist skb fail\n");
				return -ENXIO;
			}

			if (ring->page_pool)
				skb_mark_for_recycle(new_skb);

			ring->frag_num = 0;

			if (ring->tail_skb) {
@@ -4705,6 +4744,29 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
	priv->ring = NULL;
}

static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
{
	struct page_pool_params pp_params = {
		.flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
				PP_FLAG_DMA_SYNC_DEV,
		.order = hns3_page_order(ring),
		.pool_size = ring->desc_num * hns3_buf_size(ring) /
				(PAGE_SIZE << hns3_page_order(ring)),
		.nid = dev_to_node(ring_to_dev(ring)),
		.dev = ring_to_dev(ring),
		.dma_dir = DMA_FROM_DEVICE,
		.offset = 0,
		.max_len = PAGE_SIZE << hns3_page_order(ring),
	};

	ring->page_pool = page_pool_create(&pp_params);
	if (IS_ERR(ring->page_pool)) {
		dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n",
			 PTR_ERR(ring->page_pool));
		ring->page_pool = NULL;
	}
}

static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
{
	int ret;
@@ -4724,6 +4786,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
		goto out_with_desc_cb;

	if (!HNAE3_IS_TX_RING(ring)) {
		hns3_alloc_page_pool(ring);

		ret = hns3_alloc_ring_buffers(ring);
		if (ret)
			goto out_with_desc;
@@ -4764,6 +4828,11 @@ void hns3_fini_ring(struct hns3_enet_ring *ring)
		devm_kfree(ring_to_dev(ring), tx_spare);
		ring->tx_spare = NULL;
	}

	if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) {
		page_pool_destroy(ring->page_pool);
		ring->page_pool = NULL;
	}
}

static int hns3_buf_size2type(u32 buf_size)
+3 −0
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@

#include <linux/dim.h>
#include <linux/if_vlan.h>
#include <net/page_pool.h>

#include "hnae3.h"

@@ -307,6 +308,7 @@ enum hns3_desc_type {
	DESC_TYPE_BOUNCE_ALL		= 1 << 3,
	DESC_TYPE_BOUNCE_HEAD		= 1 << 4,
	DESC_TYPE_SGL_SKB		= 1 << 5,
	DESC_TYPE_PP_FRAG		= 1 << 6,
};

struct hns3_desc_cb {
@@ -451,6 +453,7 @@ struct hns3_enet_ring {
	struct hnae3_queue *tqp;
	int queue_index;
	struct device *dev; /* will be used for DMA mapping of descriptors */
	struct page_pool *page_pool;

	/* statistic */
	struct ring_stats stats;
+1 −5
Original line number Diff line number Diff line
@@ -2327,7 +2327,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
	if (!skb)
		return ERR_PTR(-ENOMEM);

	skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
	skb_mark_for_recycle(skb);

	skb_reserve(skb, xdp->data - xdp->data_hard_start);
	skb_put(skb, xdp->data_end - xdp->data);
@@ -2339,10 +2339,6 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
				skb_frag_page(frag), skb_frag_off(frag),
				skb_frag_size(frag), PAGE_SIZE);
		/* We don't need to reset pp_recycle here. It's already set, so
		 * just mark fragments for recycling.
		 */
		page_pool_store_mem_info(skb_frag_page(frag), pool);
	}

	return skb;
+1 −1
Original line number Diff line number Diff line
@@ -3995,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
		}

		if (pp)
			skb_mark_for_recycle(skb, page, pp);
			skb_mark_for_recycle(skb);
		else
			dma_unmap_single_attrs(dev->dev.parent, dma_addr,
					       bm_pool->buf_size, DMA_FROM_DEVICE,
Loading