Unverified Commit dcde7354 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!7078 v2 Backport four conflict stable patch

Merge Pull Request from: @ci-robot 
 
PR sync from: Liu Shixin <liushixin2@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/MBGLKG6VDA4KFC7AR4TEMEUYMD465GIB/ 
Backport four stable patch which have conflicts need to fix.

Kirill A. Shutemov (1):
  mm, treewide: introduce NR_PAGE_ORDERS

Matthew Wilcox (Oracle) (1):
  mm: turn folio_test_hugetlb into a PageType

Miaohe Lin (1):
  fork: defer linking file vma until vma is fully initialized

Peter Xu (1):
  mm/hugetlb: fix missing hugetlb_lock for resv uncharge


-- 
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I9NYY7 
 
Link:https://gitee.com/openeuler/kernel/pulls/7078

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarKevin Zhu <zhukeqian1@huawei.com>
Signed-off-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
parents 5a6694b0 738fe30d
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -172,7 +172,7 @@ variables.
Offset of the free_list's member. This value is used to compute the number
of free pages.

Each zone has a free_area structure array called free_area[MAX_ORDER + 1].
Each zone has a free_area structure array called free_area[NR_PAGE_ORDERS].
The free_list represents a linked list of free page blocks.

(list_head, next|prev)
@@ -189,8 +189,8 @@ Offsets of the vmap_area's members. They carry vmalloc-specific
information. Makedumpfile gets the start address of the vmalloc region
from this.

(zone.free_area, MAX_ORDER + 1)
-------------------------------
(zone.free_area, NR_PAGE_ORDERS)
--------------------------------

Free areas descriptor. User-space tools use this value to iterate the
free_area ranges. MAX_ORDER is used by the zone buddy allocator.
+1 −1
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@ struct hyp_pool {
	 * API at EL2.
	 */
	hyp_spinlock_t lock;
	struct list_head free_area[MAX_ORDER + 1];
	struct list_head free_area[NR_PAGE_ORDERS];
	phys_addr_t range_start;
	phys_addr_t range_end;
	unsigned short max_order;
+1 −1
Original line number Diff line number Diff line
@@ -897,7 +897,7 @@ void __init cheetah_ecache_flush_init(void)

	/* Now allocate error trap reporting scoreboard. */
	sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
	for (order = 0; order <= MAX_ORDER; order++) {
	for (order = 0; order < NR_PAGE_ORDERS; order++) {
		if ((PAGE_SIZE << order) >= sz)
			break;
	}
+1 −1
Original line number Diff line number Diff line
@@ -175,7 +175,7 @@ static void ttm_device_init_pools(struct kunit *test)

	if (params->pools_init_expected) {
		for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
			for (int j = 0; j <= MAX_ORDER; ++j) {
			for (int j = 0; j < NR_PAGE_ORDERS; ++j) {
				pt = pool->caching[i].orders[j];
				KUNIT_EXPECT_PTR_EQ(test, pt.pool, pool);
				KUNIT_EXPECT_EQ(test, pt.caching, i);
+10 −10
Original line number Diff line number Diff line
@@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644);

static atomic_long_t allocated_pages;

static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
static struct ttm_pool_type global_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_uncached[NR_PAGE_ORDERS];

static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
static struct ttm_pool_type global_dma32_write_combined[NR_PAGE_ORDERS];
static struct ttm_pool_type global_dma32_uncached[NR_PAGE_ORDERS];

static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
@@ -565,7 +565,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,

	if (use_dma_alloc || nid != NUMA_NO_NODE) {
		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
			for (j = 0; j <= MAX_ORDER; ++j)
			for (j = 0; j < NR_PAGE_ORDERS; ++j)
				ttm_pool_type_init(&pool->caching[i].orders[j],
						   pool, i, j);
	}
@@ -586,7 +586,7 @@ void ttm_pool_fini(struct ttm_pool *pool)

	if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
			for (j = 0; j <= MAX_ORDER; ++j)
			for (j = 0; j < NR_PAGE_ORDERS; ++j)
				ttm_pool_type_fini(&pool->caching[i].orders[j]);
	}

@@ -641,7 +641,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m)
	unsigned int i;

	seq_puts(m, "\t ");
	for (i = 0; i <= MAX_ORDER; ++i)
	for (i = 0; i < NR_PAGE_ORDERS; ++i)
		seq_printf(m, " ---%2u---", i);
	seq_puts(m, "\n");
}
@@ -652,7 +652,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
{
	unsigned int i;

	for (i = 0; i <= MAX_ORDER; ++i)
	for (i = 0; i < NR_PAGE_ORDERS; ++i)
		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
	seq_puts(m, "\n");
}
@@ -761,7 +761,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
	spin_lock_init(&shrinker_lock);
	INIT_LIST_HEAD(&shrinker_list);

	for (i = 0; i <= MAX_ORDER; ++i) {
	for (i = 0; i < NR_PAGE_ORDERS; ++i) {
		ttm_pool_type_init(&global_write_combined[i], NULL,
				   ttm_write_combined, i);
		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
@@ -794,7 +794,7 @@ void ttm_pool_mgr_fini(void)
{
	unsigned int i;

	for (i = 0; i <= MAX_ORDER; ++i) {
	for (i = 0; i < NR_PAGE_ORDERS; ++i) {
		ttm_pool_type_fini(&global_write_combined[i]);
		ttm_pool_type_fini(&global_uncached[i]);

Loading