Commit ad6fa1e1 authored by Joe Damato's avatar Joe Damato Committed by David S. Miller
Browse files

page_pool: Add recycle stats



Add per-cpu stats tracking page pool recycling events:
	- cached: recycling placed page in the page pool cache
	- cache_full: page pool cache was full
	- ring: page placed into the ptr ring
	- ring_full: page released from page pool because the ptr ring was full
	- released_refcnt: page released (and not recycled) because refcnt > 1

Signed-off-by: default avatarJoe Damato <jdamato@fastly.com>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Reviewed-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8610037e
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -95,6 +95,18 @@ struct page_pool_alloc_stats {
	u64 refill; /* allocations via successful refill */
	u64 waive;  /* failed refills due to numa zone mismatch */
};

struct page_pool_recycle_stats {
	u64 cached;	/* recycling placed page in the cache. */
	u64 cache_full; /* cache was full */
	u64 ring;	/* recycling placed page back into ptr ring */
	u64 ring_full;	/* page was released from page-pool because
			 * PTR ring was full.
			 */
	u64 released_refcnt; /* page released because of elevated
			      * refcnt
			      */
};
#endif

struct page_pool {
@@ -144,6 +156,10 @@ struct page_pool {
	 */
	struct ptr_ring ring;

#ifdef CONFIG_PAGE_POOL_STATS
	/* recycle stats are per-cpu to avoid locking */
	struct page_pool_recycle_stats __percpu *recycle_stats;
#endif
	atomic_t pages_state_release_cnt;

	/* A page_pool is strictly tied to a single RX-queue being
+28 −2
Original line number Diff line number Diff line
@@ -29,8 +29,15 @@
#ifdef CONFIG_PAGE_POOL_STATS
/* alloc_stat_inc is intended to be used in softirq context */
#define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
/* recycle_stat_inc is safe to use when preemption is possible. */
#define recycle_stat_inc(pool, __stat)							\
	do {										\
		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
		this_cpu_inc(s->__stat);						\
	} while (0)
#else
#define alloc_stat_inc(pool, __stat)
#define recycle_stat_inc(pool, __stat)
#endif

static int page_pool_init(struct page_pool *pool,
@@ -80,6 +87,12 @@ static int page_pool_init(struct page_pool *pool,
	    pool->p.flags & PP_FLAG_PAGE_FRAG)
		return -EINVAL;

#ifdef CONFIG_PAGE_POOL_STATS
	pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
	if (!pool->recycle_stats)
		return -ENOMEM;
#endif

	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
		return -ENOMEM;

@@ -410,7 +423,12 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
	else
		ret = ptr_ring_produce_bh(&pool->ring, page);

	return (ret == 0) ? true : false;
	if (!ret) {
		recycle_stat_inc(pool, ring);
		return true;
	}

	return false;
}

/* Only allow direct recycling in special circumstances, into the
@@ -421,11 +439,14 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
static bool page_pool_recycle_in_cache(struct page *page,
				       struct page_pool *pool)
{
	if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
	if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
		recycle_stat_inc(pool, cache_full);
		return false;
	}

	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
	pool->alloc.cache[pool->alloc.count++] = page;
	recycle_stat_inc(pool, cached);
	return true;
}

@@ -475,6 +496,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
	 * doing refcnt based recycle tricks, meaning another process
	 * will be invoking put_page.
	 */
	recycle_stat_inc(pool, released_refcnt);
	/* Do not replace this with page_pool_return_page() */
	page_pool_release_page(pool, page);
	put_page(page);
@@ -488,6 +510,7 @@ void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
	page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
	if (page && !page_pool_recycle_in_ring(pool, page)) {
		/* Cache full, fallback to free pages */
		recycle_stat_inc(pool, ring_full);
		page_pool_return_page(pool, page);
	}
}
@@ -636,6 +659,9 @@ static void page_pool_free(struct page_pool *pool)
	if (pool->p.flags & PP_FLAG_DMA_MAP)
		put_device(pool->p.dev);

#ifdef CONFIG_PAGE_POOL_STATS
	free_percpu(pool->recycle_stats);
#endif
	kfree(pool);
}