Commit 947c54c3 authored by Shay Agroskin's avatar Shay Agroskin Committed by David S. Miller
Browse files

net: ena: Use dev_alloc() in RX buffer allocation



Use dev_alloc() when allocating RX buffers instead of specifying the
allocation flags explicitly. This result in same behaviour with less
code.

Also move the page allocation and its DMA mapping into a function. This
creates a logical block, which may help understanding the code.

Signed-off-by: default avatarShay Agroskin <shayagr@amazon.com>
Reported-by: default avatarkernel test robot <lkp@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9e8afb05
Loading
Loading
Loading
Loading
+36 −22
Original line number Diff line number Diff line
@@ -975,41 +975,56 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
		ena_free_rx_resources(adapter, i);
}

static int ena_alloc_rx_page(struct ena_ring *rx_ring,
				    struct ena_rx_buffer *rx_info, gfp_t gfp)
struct page *ena_alloc_map_page(struct ena_ring *rx_ring, dma_addr_t *dma)
{
	int headroom = rx_ring->rx_headroom;
	struct ena_com_buf *ena_buf;
	struct page *page;
	dma_addr_t dma;
	int tailroom;

	/* restore page offset value in case it has been changed by device */
	rx_info->page_offset = headroom;

	/* if previous allocated page is not used */
	if (unlikely(rx_info->page))
		return 0;

	page = alloc_page(gfp);
	if (unlikely(!page)) {
	/* This would allocate the page on the same NUMA node the executing code
	 * is running on.
	 */
	page = dev_alloc_page();
	if (!page) {
		ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
				  &rx_ring->syncp);
		return -ENOMEM;
		return ERR_PTR(-ENOSPC);
	}

	/* To enable NIC-side port-mirroring, AKA SPAN port,
	 * we make the buffer readable from the nic as well
	 */
	dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
	*dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
			    DMA_BIDIRECTIONAL);
	if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
	if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) {
		ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
				  &rx_ring->syncp);

		__free_page(page);
		return -EIO;
		return ERR_PTR(-EIO);
	}

	return page;
}

static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
			       struct ena_rx_buffer *rx_info)
{
	int headroom = rx_ring->rx_headroom;
	struct ena_com_buf *ena_buf;
	struct page *page;
	dma_addr_t dma;
	int tailroom;

	/* restore page offset value in case it has been changed by device */
	rx_info->page_offset = headroom;

	/* if previous allocated page is not used */
	if (unlikely(rx_info->page))
		return 0;

	/* We handle DMA here */
	page = ena_alloc_map_page(rx_ring, &dma);
	if (unlikely(IS_ERR(page)))
		return PTR_ERR(page);

	netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
		  "Allocate page %p, rx_info %p\n", page, rx_info);

@@ -1065,8 +1080,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)

		rx_info = &rx_ring->rx_buffer_info[req_id];

		rc = ena_alloc_rx_page(rx_ring, rx_info,
				       GFP_ATOMIC | __GFP_COMP);
		rc = ena_alloc_rx_buffer(rx_ring, rx_info);
		if (unlikely(rc < 0)) {
			netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
				   "Failed to allocate buffer for rx queue %d\n",