Commit e74a726d authored by Hao Chen's avatar Hao Chen Committed by David S. Miller
Browse files

net: hns3: refactor hns3_nic_reuse_page()



Split rx copybreak handle into a separate function from function
hns3_nic_reuse_page() to improve code simplicity.

Signed-off-by: default avatarHao Chen <chenhao288@hisilicon.com>
Signed-off-by: default avatarGuangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ed0e658c
Loading
Loading
Loading
Loading
+35 −20
Original line number Diff line number Diff line
@@ -3546,6 +3546,38 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
	return page_count(cb->priv) == cb->pagecnt_bias;
}

static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
				    struct hns3_enet_ring *ring,
				    int pull_len,
				    struct hns3_desc_cb *desc_cb)
{
	struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
	u32 frag_offset = desc_cb->page_offset + pull_len;
	int size = le16_to_cpu(desc->rx.size);
	u32 frag_size = size - pull_len;
	void *frag = napi_alloc_frag(frag_size);

	if (unlikely(!frag)) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.frag_alloc_err++;
		u64_stats_update_end(&ring->syncp);

		hns3_rl_err(ring_to_netdev(ring),
			    "failed to allocate rx frag\n");
		return -ENOMEM;
	}

	desc_cb->reuse_flag = 1;
	memcpy(frag, desc_cb->buf + frag_offset, frag_size);
	skb_add_rx_frag(skb, i, virt_to_page(frag),
			offset_in_page(frag), frag_size, frag_size);

	u64_stats_update_begin(&ring->syncp);
	ring->stats.frag_alloc++;
	u64_stats_update_end(&ring->syncp);
	return 0;
}

static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
				struct hns3_enet_ring *ring, int pull_len,
				struct hns3_desc_cb *desc_cb)
@@ -3555,6 +3587,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
	int size = le16_to_cpu(desc->rx.size);
	u32 truesize = hns3_buf_size(ring);
	u32 frag_size = size - pull_len;
	int ret = 0;
	bool reused;

	if (ring->page_pool) {
@@ -3589,29 +3622,11 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
		desc_cb->page_offset = 0;
		desc_cb->reuse_flag = 1;
	} else if (frag_size <= ring->rx_copybreak) {
		void *frag = napi_alloc_frag(frag_size);

		if (unlikely(!frag)) {
			u64_stats_update_begin(&ring->syncp);
			ring->stats.frag_alloc_err++;
			u64_stats_update_end(&ring->syncp);

			hns3_rl_err(ring_to_netdev(ring),
				    "failed to allocate rx frag\n");
		ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
		if (ret)
			goto out;
	}

		desc_cb->reuse_flag = 1;
		memcpy(frag, desc_cb->buf + frag_offset, frag_size);
		skb_add_rx_frag(skb, i, virt_to_page(frag),
				offset_in_page(frag), frag_size, frag_size);

		u64_stats_update_begin(&ring->syncp);
		ring->stats.frag_alloc++;
		u64_stats_update_end(&ring->syncp);
		return;
	}

out:
	desc_cb->pagecnt_bias--;