Unverified Commit cd9687f0 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3018 folio conversions for numa balance

Merge Pull Request from: @ci-robot 
 
PR sync from: Peng Zhang <zhangpeng362@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/674RGQ3Q3HBEOKDGLEUVNYCOW4WCEXPD/ 
From: ZhangPeng <zhangpeng362@huawei.com>

Includes 3 patch series "mm: migrate: more folio conversion and
unification", "mm: convert numa balancing functions to use a folio" and
"mm: convert page cpupid functions to folios" to implement folio
conversions of numa balance.

Kefeng Wang (33):
  mm: migrate: remove PageTransHuge check in numamigrate_isolate_page()
  mm: migrate: remove THP mapcount check in numamigrate_isolate_page()
  mm: migrate: convert numamigrate_isolate_page() to
    numamigrate_isolate_folio()
  mm: migrate: convert migrate_misplaced_page() to
    migrate_misplaced_folio()
  mm: migrate: use __folio_test_movable()
  mm: migrate: use a folio in add_page_for_migration()
  mm: migrate: remove PageHead() check for HugeTLB in
    add_page_for_migration()
  mm: migrate: remove isolated variable in add_page_for_migration()
  mm: memory: add vm_normal_folio_pmd()
  mm: huge_memory: use a folio in do_huge_pmd_numa_page()
  mm: memory: use a folio in do_numa_page()
  mm: memory: make numa_migrate_prep() to take a folio
  mm: mempolicy: make mpol_misplaced() to take a folio
  sched/numa, mm: make numa migrate functions to take a folio
  mm_types: add virtual and _last_cpupid into struct folio
  mm: add folio_last_cpupid()
  mm: memory: use folio_last_cpupid() in do_numa_page()
  mm: huge_memory: use folio_last_cpupid() in do_huge_pmd_numa_page()
  mm: huge_memory: use folio_last_cpupid() in __split_huge_page_tail()
  mm: remove page_cpupid_last()
  mm: add folio_xchg_access_time()
  sched/fair: use folio_xchg_access_time() in numa_hint_fault_latency()
  mm: mprotect: use a folio in change_pte_range()
  mm: huge_memory: use a folio in change_huge_pmd()
  mm: remove xchg_page_access_time()
  mm: add folio_xchg_last_cpupid()
  sched/fair: use folio_xchg_last_cpupid() in
    should_numa_migrate_memory()
  mm: migrate: use folio_xchg_last_cpupid() in folio_migrate_flags()
  mm: huge_memory: use folio_xchg_last_cpupid() in
    __split_huge_page_tail()
  mm: make finish_mkwrite_fault() static
  mm: convert wp_page_reuse() and finish_mkwrite_fault() to take a folio
  mm: use folio_xchg_last_cpupid() in wp_page_reuse()
  mm: remove page_cpupid_xchg_last()


-- 
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I8JQWQ 
 
Link:https://gitee.com/openeuler/kernel/pulls/3018

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarZucheng Zheng <zhengzucheng@huawei.com>
Reviewed-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents 13398d9e 8760e27b
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -174,7 +174,7 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
extern bool vma_migratable(struct vm_area_struct *vma);

extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);

static inline bool mpol_is_preferred_many(struct mempolicy *pol)
@@ -278,7 +278,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
}
#endif

static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
static inline int mpol_misplaced(struct folio *folio,
				 struct vm_area_struct *vma,
				 unsigned long address)
{
	return -1; /* no node preference */
+2 −2
Original line number Diff line number Diff line
@@ -142,10 +142,10 @@ const struct movable_operations *page_movable_ops(struct page *page)
}

#ifdef CONFIG_NUMA_BALANCING
int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
			   int node);
#else
static inline int migrate_misplaced_page(struct page *page,
static inline int migrate_misplaced_folio(struct folio *folio,
					 struct vm_area_struct *vma, int node)
{
	return -EAGAIN; /* can't migrate now */
+17 −15
Original line number Diff line number Diff line
@@ -1337,7 +1337,6 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
		struct page *page, unsigned int nr, unsigned long addr);

vm_fault_t finish_fault(struct vm_fault *vmf);
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif

/*
@@ -1686,26 +1685,26 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)

#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
	return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
}

static inline int page_cpupid_last(struct page *page)
static inline int folio_last_cpupid(struct folio *folio)
{
	return page->_last_cpupid;
	return folio->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
{
	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
static inline int page_cpupid_last(struct page *page)
static inline int folio_last_cpupid(struct folio *folio)
{
	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
	return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}

extern int page_cpupid_xchg_last(struct page *page, int cpupid);
int folio_xchg_last_cpupid(struct folio *folio, int cpupid);

static inline void page_cpupid_reset_last(struct page *page)
{
@@ -1713,11 +1712,12 @@ static inline void page_cpupid_reset_last(struct page *page)
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */

static inline int xchg_page_access_time(struct page *page, int time)
static inline int folio_xchg_access_time(struct folio *folio, int time)
{
	int last_time;

	last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
	last_time = folio_xchg_last_cpupid(folio,
					   time >> PAGE_ACCESS_TIME_BUCKETS);
	return last_time << PAGE_ACCESS_TIME_BUCKETS;
}

@@ -1731,19 +1731,19 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
	}
}
#else /* !CONFIG_NUMA_BALANCING */
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
	return page_to_nid(page); /* XXX */
	return folio_nid(folio); /* XXX */
}

static inline int xchg_page_access_time(struct page *page, int time)
static inline int folio_xchg_access_time(struct folio *folio, int time)
{
	return 0;
}

static inline int page_cpupid_last(struct page *page)
static inline int folio_last_cpupid(struct folio *folio)
{
	return page_to_nid(page); /* XXX */
	return folio_nid(folio); /* XXX */
}

static inline int cpupid_to_nid(int cpupid)
@@ -2327,6 +2327,8 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
			     pte_t pte);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
			     pte_t pte);
struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
				  unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
				pmd_t pmd);

+18 −4
Original line number Diff line number Diff line
@@ -199,6 +199,10 @@ struct page {
					   not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */

#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
	int _last_cpupid;
#endif

#ifdef CONFIG_KMSAN
	/*
	 * KMSAN metadata for this page:
@@ -210,10 +214,6 @@ struct page {
	struct page *kmsan_shadow;
	struct page *kmsan_origin;
#endif

#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
	int _last_cpupid;
#endif
} _struct_page_alignment;

/*
@@ -272,6 +272,8 @@ typedef struct {
 * @_refcount: Do not access this member directly.  Use folio_ref_count()
 *    to find how many references there are to this folio.
 * @memcg_data: Memory Control Group data.
 * @virtual: Virtual address in the kernel direct map.
 * @_last_cpupid: IDs of last CPU and last process that accessed the folio.
 * @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
 * @_nr_pages_mapped: Do not use directly, call folio_mapcount().
 * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
@@ -317,6 +319,12 @@ struct folio {
			atomic_t _refcount;
#ifdef CONFIG_MEMCG
			unsigned long memcg_data;
#endif
#if defined(WANT_PAGE_VIRTUAL)
			void *virtual;
#endif
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
			int _last_cpupid;
#endif
	/* private: the union with struct page is transitional */
		};
@@ -373,6 +381,12 @@ FOLIO_MATCH(_refcount, _refcount);
#ifdef CONFIG_MEMCG
FOLIO_MATCH(memcg_data, memcg_data);
#endif
#if defined(WANT_PAGE_VIRTUAL)
FOLIO_MATCH(virtual, virtual);
#endif
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
FOLIO_MATCH(_last_cpupid, _last_cpupid);
#endif
#undef FOLIO_MATCH
#define FOLIO_MATCH(pg, fl)						\
	static_assert(offsetof(struct folio, fl) ==			\
+3 −3
Original line number Diff line number Diff line
@@ -20,7 +20,7 @@ extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
extern void task_numa_free(struct task_struct *p, bool final);
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
				int src_nid, int dst_cpu);
#else
static inline void task_numa_fault(int last_node, int node, int pages,
@@ -38,7 +38,7 @@ static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
				struct page *page, int src_nid, int dst_cpu)
				struct folio *folio, int src_nid, int dst_cpu)
{
	return true;
}
Loading