Commit 05656132 authored by Alexander Lobakin's avatar Alexander Lobakin Committed by Jakub Kicinski
Browse files

net: page_pool: simplify page recycling condition tests



pool_page_reusable() is a leftover from pre-NUMA-aware times. For now,
this function is just a redundant wrapper over page_is_pfmemalloc(),
so inline it into its sole call site.

Signed-off-by: default avatarAlexander Lobakin <alobakin@pm.me>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Reviewed-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent a79afa78
Loading
Loading
Loading
Loading
+4 −10
Original line number Diff line number Diff line
@@ -350,14 +350,6 @@ static bool page_pool_recycle_in_cache(struct page *page,
	return true;
}

/* page is NOT reusable when:
 * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
 */
static bool pool_page_reusable(struct page_pool *pool, struct page *page)
{
	return !page_is_pfmemalloc(page);
}

/* If the page refcnt == 1, this will try to recycle the page.
 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
 * the configured size min(dma_sync_size, pool->max_len).
@@ -373,9 +365,11 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
	 * regular page allocator APIs.
	 *
	 * refcnt == 1 means page_pool owns page, and can recycle it.
	 *
	 * page is NOT reusable when allocated when system is under
	 * some pressure. (page_is_pfmemalloc)
	 */
	if (likely(page_ref_count(page) == 1 &&
		   pool_page_reusable(pool, page))) {
	if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
		/* Read barrier done in page_ref_count / READ_ONCE */

		if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)