Commit ced5cc6a authored by Jian Shen's avatar Jian Shen Committed by Jiantao Xiao
Browse files

net: page_pool: fix kabi issue for page poos statistics

driver inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8BNYE


CVE: NA

----------------------------------------------------------------------

Currently the page pool statistisc member is place at
middle part of struct page_pool, which may break the
kabi. To fix it, replaced the KABI_RESERVE(1) field
with a pointer refer to the page pool statistics.

Signed-off-by: default avatarJian Shen <shenjian15@huawei.com>
parent 243babb8
Loading
Loading
Loading
Loading
+14 −9
Original line number Diff line number Diff line
@@ -116,6 +116,16 @@ struct page_pool_stats {
	struct page_pool_recycle_stats recycle_stats;
};

/* To solve the KABI issue, introduce the new statistics structure
 * to store the member alloc_stats and recycle_stats.
 */
struct page_pool_raw_stats {
	/* these stats are incremented while in softirq context */
	struct page_pool_alloc_stats alloc_stats;
	/* recycle stats are per-cpu to avoid locking */
	struct page_pool_recycle_stats __percpu *recycle_stats;
};

int page_pool_ethtool_stats_get_count(void);
u8 *page_pool_ethtool_stats_get_strings(u8 *data);
u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
@@ -159,11 +169,6 @@ struct page_pool {
	struct page *frag_page;
	long frag_users;

#ifdef CONFIG_PAGE_POOL_STATS
	/* these stats are incremented while in softirq context */
	struct page_pool_alloc_stats alloc_stats;
#endif

	/*
	 * Data structure for allocation side
	 *
@@ -192,10 +197,6 @@ struct page_pool {
	 */
	struct ptr_ring ring;

#ifdef CONFIG_PAGE_POOL_STATS
	/* recycle stats are per-cpu to avoid locking */
	struct page_pool_recycle_stats __percpu *recycle_stats;
#endif
	atomic_t pages_state_release_cnt;

	/* A page_pool is strictly tied to a single RX-queue being
@@ -206,7 +207,11 @@ struct page_pool {

	u64 destroy_cnt;

#ifdef CONFIG_PAGE_POOL_STATS
	KABI_USE(1, struct page_pool_raw_stats *stats)
#else
	KABI_RESERVE(1)
#endif
};

struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+32 −18
Original line number Diff line number Diff line
@@ -18,7 +18,6 @@
#include <linux/page-flags.h>
#include <linux/mm.h> /* for __put_page() */
#include <linux/poison.h>
#include <linux/ethtool.h>

#include <trace/events/page_pool.h>

@@ -29,21 +28,25 @@

#ifdef CONFIG_PAGE_POOL_STATS
/* alloc_stat_inc is intended to be used in softirq context */
#define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
#define alloc_stat_inc(pool, __stat)	(pool->stats->alloc_stats.__stat++)
/* recycle_stat_inc is safe to use when preemption is possible. */
#define recycle_stat_inc(pool, __stat)							\
	do {										\
		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
		struct page_pool_recycle_stats __percpu *s = pool->stats->recycle_stats;\
		this_cpu_inc(s->__stat);						\
	} while (0)

#define recycle_stat_add(pool, __stat, val)						\
	do {										\
		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
		struct page_pool_recycle_stats __percpu *s = pool->stats->recycle_stats;\
		this_cpu_add(s->__stat, val);						\
	} while (0)

static const char pp_stats[][ETH_GSTRING_LEN] = {
/* workaround for macro ETH_GSTRING_LEN, for include the header file ethtool.h
 * will cause KABI issue, so define a new one to replace it.
 */
#define PP_ETH_GSTRING_LEN 32
static const char pp_stats[][PP_ETH_GSTRING_LEN] = {
	"rx_pp_alloc_fast",
	"rx_pp_alloc_slow",
	"rx_pp_alloc_slow_ho",
@@ -66,16 +69,16 @@ bool page_pool_get_stats(struct page_pool *pool,
		return false;

	/* The caller is responsible to initialize stats. */
	stats->alloc_stats.fast += pool->alloc_stats.fast;
	stats->alloc_stats.slow += pool->alloc_stats.slow;
	stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
	stats->alloc_stats.empty += pool->alloc_stats.empty;
	stats->alloc_stats.refill += pool->alloc_stats.refill;
	stats->alloc_stats.waive += pool->alloc_stats.waive;
	stats->alloc_stats.fast += pool->stats->alloc_stats.fast;
	stats->alloc_stats.slow += pool->stats->alloc_stats.slow;
	stats->alloc_stats.slow_high_order += pool->stats->alloc_stats.slow_high_order;
	stats->alloc_stats.empty += pool->stats->alloc_stats.empty;
	stats->alloc_stats.refill += pool->stats->alloc_stats.refill;
	stats->alloc_stats.waive += pool->stats->alloc_stats.waive;

	for_each_possible_cpu(cpu) {
		const struct page_pool_recycle_stats *pcpu =
			per_cpu_ptr(pool->recycle_stats, cpu);
			per_cpu_ptr(pool->stats->recycle_stats, cpu);

		stats->recycle_stats.cached += pcpu->cached;
		stats->recycle_stats.cache_full += pcpu->cache_full;
@@ -93,8 +96,8 @@ u8 *page_pool_ethtool_stats_get_strings(u8 *data)
	int i;

	for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
		memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
		data += ETH_GSTRING_LEN;
		memcpy(data, pp_stats[i], PP_ETH_GSTRING_LEN);
		data += PP_ETH_GSTRING_LEN;
	}

	return data;
@@ -183,13 +186,16 @@ static int page_pool_init(struct page_pool *pool,
	}

#ifdef CONFIG_PAGE_POOL_STATS
	pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
	if (!pool->recycle_stats)
	pool->stats = kzalloc_node(sizeof(*pool->stats), GFP_KERNEL, params->nid);
	if (!pool->stats)
		return -ENOMEM;
	pool->stats->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
	if (!pool->stats->recycle_stats)
		goto out;
#endif

	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
		return -ENOMEM;
		goto out;

	atomic_set(&pool->pages_state_release_cnt, 0);

@@ -200,6 +206,13 @@ static int page_pool_init(struct page_pool *pool,
		get_device(pool->p.dev);

	return 0;
out:
#ifdef CONFIG_PAGE_POOL_STATS
	free_percpu(pool->stats->recycle_stats);
	kfree(pool->stats);
	pool->stats = NULL;
#endif
	return -ENOMEM;
}

struct page_pool *page_pool_create(const struct page_pool_params *params)
@@ -767,7 +780,8 @@ static void page_pool_free(struct page_pool *pool)
		put_device(pool->p.dev);

#ifdef CONFIG_PAGE_POOL_STATS
	free_percpu(pool->recycle_stats);
	free_percpu(pool->stats->recycle_stats);
	kfree(pool->stats);
#endif
	kfree(pool);
}