Commit da5ec7f2 authored by Ong Boon Leong's avatar Ong Boon Leong Committed by David S. Miller
Browse files

net: stmmac: refactor stmmac_init_rx_buffers for stmmac_reinit_rx_buffers



The per-queue RX buffer allocation in stmmac_reinit_rx_buffers() can be
made to use stmmac_alloc_rx_buffers() by merging the page_pool alloc
checks for "buf->page" and "buf->sec_page" in stmmac_init_rx_buffers().

This is in preparation for XSK pool allocation later.

Signed-off-by: default avatarOng Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 80f573c9
Loading
Loading
Loading
Loading
+189 −189
Original line number Diff line number Diff line
@@ -1388,12 +1388,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
	struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];

	if (!buf->page) {
		buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
		if (!buf->page)
			return -ENOMEM;
		buf->page_offset = stmmac_rx_offset(priv);
	}

	if (priv->sph) {
	if (priv->sph && !buf->sec_page) {
		buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
		if (!buf->sec_page)
			return -ENOMEM;
@@ -1547,48 +1549,16 @@ static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
{
	u32 rx_count = priv->plat->rx_queues_to_use;
	u32 queue;
	int i;

	for (queue = 0; queue < rx_count; queue++)
		dma_recycle_rx_skbufs(priv, queue);

	for (queue = 0; queue < rx_count; queue++) {
		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

		for (i = 0; i < priv->dma_rx_size; i++) {
			struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
			struct dma_desc *p;

			if (priv->extend_desc)
				p = &((rx_q->dma_erx + i)->basic);
			else
				p = rx_q->dma_rx + i;

			if (!buf->page) {
				buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
				if (!buf->page)
					goto err_reinit_rx_buffers;

				buf->addr = page_pool_get_dma_addr(buf->page) +
					    buf->page_offset;
			}
		int ret;

			if (priv->sph && !buf->sec_page) {
				buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
				if (!buf->sec_page)
		ret = stmmac_alloc_rx_buffers(priv, queue, GFP_KERNEL);
		if (ret < 0)
			goto err_reinit_rx_buffers;

				buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
			}

			stmmac_set_desc_addr(priv, p, buf->addr);
			if (priv->sph)
				stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
			else
				stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
			if (priv->dma_buf_sz == BUF_SIZE_16KiB)
				stmmac_init_desc3(priv, p);
		}
	}

	return;
@@ -1791,16 +1761,12 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
}

/**
 * free_dma_rx_desc_resources - free RX dma desc resources
 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
 * @priv: private structure
 * @queue: RX queue index
 */
static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
{
	u32 rx_count = priv->plat->rx_queues_to_use;
	u32 queue;

	/* Free RX queue resources */
	for (queue = 0; queue < rx_count; queue++) {
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];

	/* Release the DMA RX socket buffers */
@@ -1823,19 +1789,24 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
	if (rx_q->page_pool)
		page_pool_destroy(rx_q->page_pool);
}

static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
{
	u32 rx_count = priv->plat->rx_queues_to_use;
	u32 queue;

	/* Free RX queue resources */
	for (queue = 0; queue < rx_count; queue++)
		__free_dma_rx_desc_resources(priv, queue);
}

/**
 * free_dma_tx_desc_resources - free TX dma desc resources
 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
 * @priv: private structure
 * @queue: TX queue index
 */
static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
{
	u32 tx_count = priv->plat->tx_queues_to_use;
	u32 queue;

	/* Free TX queue resources */
	for (queue = 0; queue < tx_count; queue++) {
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
	size_t size;
	void *addr;
@@ -1861,27 +1832,31 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
	kfree(tx_q->tx_skbuff_dma);
	kfree(tx_q->tx_skbuff);
}

static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
{
	u32 tx_count = priv->plat->tx_queues_to_use;
	u32 queue;

	/* Free TX queue resources */
	for (queue = 0; queue < tx_count; queue++)
		__free_dma_tx_desc_resources(priv, queue);
}

/**
 * alloc_dma_rx_desc_resources - alloc RX resources.
 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
 * @priv: private structure
 * @queue: RX queue index
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
{
	bool xdp_prog = stmmac_xdp_is_enabled(priv);
	u32 rx_count = priv->plat->rx_queues_to_use;
	int ret = -ENOMEM;
	u32 queue;

	/* RX queues buffers and DMA */
	for (queue = 0; queue < rx_count; queue++) {
	struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
	struct stmmac_channel *ch = &priv->channel[queue];
	bool xdp_prog = stmmac_xdp_is_enabled(priv);
	struct page_pool_params pp_params = { 0 };
	unsigned int num_pages;
	int ret;
@@ -1903,14 +1878,14 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
	if (IS_ERR(rx_q->page_pool)) {
		ret = PTR_ERR(rx_q->page_pool);
		rx_q->page_pool = NULL;
			goto err_dma;
		return ret;
	}

	rx_q->buf_pool = kcalloc(priv->dma_rx_size,
				 sizeof(*rx_q->buf_pool),
				 GFP_KERNEL);
	if (!rx_q->buf_pool)
			goto err_dma;
		return -ENOMEM;

	if (priv->extend_desc) {
		rx_q->dma_erx = dma_alloc_coherent(priv->device,
@@ -1919,7 +1894,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
						   &rx_q->dma_rx_phy,
						   GFP_KERNEL);
		if (!rx_q->dma_erx)
				goto err_dma;
			return -ENOMEM;

	} else {
		rx_q->dma_rx = dma_alloc_coherent(priv->device,
@@ -1928,7 +1903,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
						  &rx_q->dma_rx_phy,
						  GFP_KERNEL);
		if (!rx_q->dma_rx)
				goto err_dma;
			return -ENOMEM;
	}

	ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
@@ -1936,8 +1911,23 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
			       ch->rx_napi.napi_id);
	if (ret) {
		netdev_err(priv->dev, "Failed to register xdp rxq info\n");
			goto err_dma;
		return -EINVAL;
	}

	return 0;
}

static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
{
	u32 rx_count = priv->plat->rx_queues_to_use;
	u32 queue;
	int ret;

	/* RX queues buffers and DMA */
	for (queue = 0; queue < rx_count; queue++) {
		ret = __alloc_dma_rx_desc_resources(priv, queue);
		if (ret)
			goto err_dma;
	}

	return 0;
@@ -1949,21 +1939,16 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
}

/**
 * alloc_dma_tx_desc_resources - alloc TX resources.
 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
 * @priv: private structure
 * @queue: TX queue index
 * Description: according to which descriptor can be used (extend or basic)
 * this function allocates the resources for TX and RX paths. In case of
 * reception, for example, it pre-allocated the RX socket buffer in order to
 * allow zero-copy mechanism.
 */
static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
{
	u32 tx_count = priv->plat->tx_queues_to_use;
	int ret = -ENOMEM;
	u32 queue;

	/* TX queues buffers and DMA */
	for (queue = 0; queue < tx_count; queue++) {
	struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
	size_t size;
	void *addr;
@@ -1975,13 +1960,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
				      sizeof(*tx_q->tx_skbuff_dma),
				      GFP_KERNEL);
	if (!tx_q->tx_skbuff_dma)
			goto err_dma;
		return -ENOMEM;

	tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
				  sizeof(struct sk_buff *),
				  GFP_KERNEL);
	if (!tx_q->tx_skbuff)
			goto err_dma;
		return -ENOMEM;

	if (priv->extend_desc)
		size = sizeof(struct dma_extended_desc);
@@ -1995,7 +1980,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
	addr = dma_alloc_coherent(priv->device, size,
				  &tx_q->dma_tx_phy, GFP_KERNEL);
	if (!addr)
			goto err_dma;
		return -ENOMEM;

	if (priv->extend_desc)
		tx_q->dma_etx = addr;
@@ -2003,6 +1988,21 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
		tx_q->dma_entx = addr;
	else
		tx_q->dma_tx = addr;

	return 0;
}

static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
{
	u32 tx_count = priv->plat->tx_queues_to_use;
	u32 queue;
	int ret;

	/* TX queues buffers and DMA */
	for (queue = 0; queue < tx_count; queue++) {
		ret = __alloc_dma_tx_desc_resources(priv, queue);
		if (ret)
			goto err_dma;
	}

	return 0;