Commit a0f7add6 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Liu Shixin
Browse files

mm, treewide: introduce NR_PAGE_ORDERS

stable inclusion
from stable-v6.6.30
commit ded1ffea52132e58eaaa7d4ea39477f911796a40
category: cleanup
bugzilla: https://gitee.com/openeuler/kernel/issues/I9NYY7
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=ded1ffea52132e58eaaa7d4ea39477f911796a40

--------------------------------

[ Upstream commit fd37721803c6e73619108f76ad2e12a9aa5fafaf ]

NR_PAGE_ORDERS defines the number of page orders supported by the page
allocator, ranging from 0 to MAX_ORDER, MAX_ORDER + 1 in total.

NR_PAGE_ORDERS assists in defining arrays of page orders and allows for
more natural iteration over them.

[kirill.shutemov@linux.intel.com: fixup for kerneldoc warning]
  Link: https://lkml.kernel.org/r/20240101111512.7empzyifq7kxtzk3@box
Link: https://lkml.kernel.org/r/20231228144704.14033-1-kirill.shutemov@linux.intel.com


Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Stable-dep-of: b6976f323a86 ("drm/ttm: stop pooling cached NUMA pages v2")
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
Conflicts:
	mm/compaction.c
	kernel/crash_core.c
	mm/internal.h
[ Context conflict with commit c9b39e3f in kernel/crash_core.c.
  Context conflict with commit af879363 in mm/compaction.c. Remove
  previous define of NR_PAGE_ORDERS in mm/internal.h. ]
Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
parent 8c0dfc8b
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -172,7 +172,7 @@ variables.
Offset of the free_list's member. This value is used to compute the number
of free pages.

Each zone has a free_area structure array called free_area[MAX_ORDER + 1].
Each zone has a free_area structure array called free_area[NR_PAGE_ORDERS].
The free_list represents a linked list of free page blocks.

(list_head, next|prev)
@@ -189,8 +189,8 @@ Offsets of the vmap_area's members. They carry vmalloc-specific
information. Makedumpfile gets the start address of the vmalloc region
from this.

(zone.free_area, MAX_ORDER + 1)
-------------------------------
(zone.free_area, NR_PAGE_ORDERS)
--------------------------------

Free areas descriptor. User-space tools use this value to iterate the
free_area ranges. MAX_ORDER is used by the zone buddy allocator.
+1 −1
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@ struct hyp_pool {
	 * API at EL2.
	 */
	hyp_spinlock_t lock;
	struct list_head free_area[MAX_ORDER + 1];
	struct list_head free_area[NR_PAGE_ORDERS];
	phys_addr_t range_start;
	phys_addr_t range_end;
	unsigned short max_order;
+1 −1
Original line number Diff line number Diff line
@@ -897,7 +897,7 @@ void __init cheetah_ecache_flush_init(void)

	/* Now allocate error trap reporting scoreboard. */
	sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
	for (order = 0; order <= MAX_ORDER; order++) {
	for (order = 0; order < NR_PAGE_ORDERS; order++) {
		if ((PAGE_SIZE << order) >= sz)
			break;
	}
+1 −1
Original line number Diff line number Diff line
@@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)

	if (params->pools_init_expected) {
		for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
			for (int j = 0; j <= MAX_ORDER; ++j) {
			for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
				pt = pool->caching[i].orders[j];
				KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
				KUNIT_EXPECT_EQ(test, pt.caching, i);
+10 −10
Original line number Diff line number Diff line
@@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);

static atomic_long_t allocated_pages;

static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];

static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];

static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
@@ -565,7 +565,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,

	if (use_dma_alloc || nid != NUMA_NO_NODE) {
		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
			for (j = 0; j <= MAX_ORDER; ++j)
			for (j = 0; j < NR_PAGE_ORDERS; ++j)
				ttm_pool_type_init(&pool->caching[i].orders[j],
						   pool, i, j);
	}
@@ -586,7 +586,7 @@ void ttm_pool_fini(struct ttm_pool *pool)

	if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
			for (j = 0; j <= MAX_ORDER; ++j)
			for (j = 0; j < NR_PAGE_ORDERS; ++j)
				ttm_pool_type_fini(&pool->caching[i].orders[j]);
	}

@@ -641,7 +641,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
	unsigned int i;

	seq_puts(m, "\t ");
	for (i = 0; i <= MAX_ORDER; ++i)
	for (i = 0; i < NR_PAGE_ORDERS; ++i)
		seq_printf(m, " ---%2u---", i);
	seq_puts(m, "\n");
}
@@ -652,7 +652,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
	unsigned int i;

	for (i = 0; i <= MAX_ORDER; ++i)
	for (i = 0; i < NR_PAGE_ORDERS; ++i)
		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
	seq_puts(m, "\n");
}
@@ -761,7 +761,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
	spin_lock_init(&shrinker_lock);
	INIT_LIST_HEAD(&shrinker_list);

	for (i = 0; i <= MAX_ORDER; ++i) {
	for (i = 0; i < NR_PAGE_ORDERS; ++i) {
		ttm_pool_type_init(&global_write_combined[i], NULL,
				   ttm_write_combined, i);
		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -794,7 +794,7 @@ void ttm_pool_mgr_fini(void)
{
	unsigned int i;

	for (i = 0; i <= MAX_ORDER; ++i) {
	for (i = 0; i < NR_PAGE_ORDERS; ++i) {
		ttm_pool_type_fini(&global_write_combined[i]);
		ttm_pool_type_fini(&global_uncached[i]);

Loading