Commit 9513fa75 authored by Barry Song's avatar Barry Song Committed by Yang Yingliang
Browse files

net: hns3: streaming dma buffer sync between cpu and device



mainline inclusion
from mainline-v5.9-rc1
commit c2a2e127
category: feature
bugzilla: NA
CVE: NA

----------------------------

Right now they are empty functions for our SoC since hardware can keep
cache coherent, but it is still good to align with streaming DMA APIs
as device drivers should not make an assumption of SoC.

Reviewed-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarBarry Song <song.bao.hua@hisilicon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarYonglong Liu <liuyonglong@huawei.com>
Reviewed-by: default avatarli yongxin <liyongxin1@huawei.com>
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
parent c617a7aa
Loading
Loading
Loading
Loading
+17 −1
Original line number Original line Diff line number Diff line
@@ -2555,6 +2555,11 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
	ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
					 ring->desc_cb[i].page_offset);
					 ring->desc_cb[i].page_offset);
	ring->desc[i].rx.bd_base_info = 0;
	ring->desc[i].rx.bd_base_info = 0;

	dma_sync_single_for_device(ring_to_dev(ring),
			ring->desc_cb[i].dma + ring->desc_cb[i].page_offset,
			hns3_buf_size(ring),
			DMA_FROM_DEVICE);
}
}


static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
@@ -3000,6 +3005,11 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
			skb = ring->tail_skb;
			skb = ring->tail_skb;
		}
		}


		dma_sync_single_for_cpu(ring_to_dev(ring),
				desc_cb->dma + desc_cb->page_offset,
				hns3_buf_size(ring),
				DMA_FROM_DEVICE);

		hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
		hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
		trace_hns3_rx_desc(ring);
		trace_hns3_rx_desc(ring);
		ring_ptr_move_fw(ring, next_to_clean);
		ring_ptr_move_fw(ring, next_to_clean);
@@ -3151,9 +3161,15 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
	if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
	if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
		return -ENXIO;
		return -ENXIO;


	if (!skb)
	if (!skb) {
		ring->va = desc_cb->buf + desc_cb->page_offset;
		ring->va = desc_cb->buf + desc_cb->page_offset;


		dma_sync_single_for_cpu(ring_to_dev(ring),
				desc_cb->dma + desc_cb->page_offset,
				hns3_buf_size(ring),
				DMA_FROM_DEVICE);
	}

	/* Prefetch first cache line of first page
	/* Prefetch first cache line of first page
	 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
	 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
	 * line size is 64B so need to prefetch twice to make it 128B. But in
	 * line size is 64B so need to prefetch twice to make it 128B. But in