Commit ebe75e47 authored by Huang Ying's avatar Huang Ying Committed by Andrew Morton
Browse files

migrate_pages: share more code between _unmap and _move

This is a code cleanup patch to reduce the duplicated code between the
_unmap and _move stages of migrate_pages().  No functionality change is
expected.

Link: https://lkml.kernel.org/r/20230213123444.155149-8-ying.huang@intel.com


Signed-off-by: default avatar"Huang, Ying" <ying.huang@intel.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Bharata B Rao <bharata@amd.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Xin Hao <xhao@linux.alibaba.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 80562ba0
Loading
Loading
Loading
Loading
+85 −122
Original line number Diff line number Diff line
@@ -1055,6 +1055,7 @@ static void __migrate_folio_extract(struct folio *dst,
static void migrate_folio_undo_src(struct folio *src,
				   int page_was_mapped,
				   struct anon_vma *anon_vma,
				   bool locked,
				   struct list_head *ret)
{
	if (page_was_mapped)
@@ -1062,15 +1063,19 @@ static void migrate_folio_undo_src(struct folio *src,
	/* Drop an anon_vma reference if we took one */
	if (anon_vma)
		put_anon_vma(anon_vma);
	if (locked)
		folio_unlock(src);
	if (ret)
		list_move_tail(&src->lru, ret);
}

/* Restore the destination folio to the original state upon failure */
static void migrate_folio_undo_dst(struct folio *dst,
				   bool locked,
				   free_page_t put_new_page,
				   unsigned long private)
{
	if (locked)
		folio_unlock(dst);
	if (put_new_page)
		put_new_page(&dst->page, private);
@@ -1096,13 +1101,42 @@ static void migrate_folio_done(struct folio *src,
		folio_put(src);
}

static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force,
				 bool avoid_force_lock, enum migrate_mode mode)
/* Obtain the lock on page, remove all ptes. */
static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
			       unsigned long private, struct folio *src,
			       struct folio **dstp, int force, bool avoid_force_lock,
			       enum migrate_mode mode, enum migrate_reason reason,
			       struct list_head *ret)
{
	struct folio *dst;
	int rc = -EAGAIN;
	struct page *newpage = NULL;
	int page_was_mapped = 0;
	struct anon_vma *anon_vma = NULL;
	bool is_lru = !__PageMovable(&src->page);
	bool locked = false;
	bool dst_locked = false;

	if (!thp_migration_supported() && folio_test_transhuge(src))
		return -ENOSYS;

	if (folio_ref_count(src) == 1) {
		/* Folio was freed from under us. So we are done. */
		folio_clear_active(src);
		folio_clear_unevictable(src);
		/* free_pages_prepare() will clear PG_isolated. */
		list_del(&src->lru);
		migrate_folio_done(src, reason);
		return MIGRATEPAGE_SUCCESS;
	}

	newpage = get_new_page(&src->page, private);
	if (!newpage)
		return -ENOMEM;
	dst = page_folio(newpage);
	*dstp = dst;

	dst->private = NULL;

	if (!folio_trylock(src)) {
		if (!force || mode == MIGRATE_ASYNC)
@@ -1137,6 +1171,7 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force

		folio_lock(src);
	}
	locked = true;

	if (folio_test_writeback(src)) {
		/*
@@ -1151,10 +1186,10 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force
			break;
		default:
			rc = -EBUSY;
			goto out_unlock;
			goto out;
		}
		if (!force)
			goto out_unlock;
			goto out;
		folio_wait_writeback(src);
	}

@@ -1184,7 +1219,8 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force
	 * This is much like races on refcount of oldpage: just don't BUG().
	 */
	if (unlikely(!folio_trylock(dst)))
		goto out_unlock;
		goto out;
	dst_locked = true;

	if (unlikely(!is_lru)) {
		__migrate_folio_record(dst, page_was_mapped, anon_vma);
@@ -1206,7 +1242,7 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force
	if (!src->mapping) {
		if (folio_test_private(src)) {
			try_to_free_buffers(src);
			goto out_unlock_both;
			goto out;
		}
	} else if (folio_mapped(src)) {
		/* Establish migration ptes */
@@ -1221,73 +1257,25 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force
		return MIGRATEPAGE_UNMAP;
	}

	if (page_was_mapped)
		remove_migration_ptes(src, src, false);

out_unlock_both:
	folio_unlock(dst);
out_unlock:
	/* Drop an anon_vma reference if we took one */
	if (anon_vma)
		put_anon_vma(anon_vma);
	folio_unlock(src);
out:

	return rc;
}

/* Obtain the lock on page, remove all ptes. */
static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
			       unsigned long private, struct folio *src,
			       struct folio **dstp, int force, bool avoid_force_lock,
			       enum migrate_mode mode, enum migrate_reason reason,
			       struct list_head *ret)
{
	struct folio *dst;
	int rc = MIGRATEPAGE_UNMAP;
	struct page *newpage = NULL;

	if (!thp_migration_supported() && folio_test_transhuge(src))
		return -ENOSYS;

	if (folio_ref_count(src) == 1) {
		/* Folio was freed from under us. So we are done. */
		folio_clear_active(src);
		folio_clear_unevictable(src);
		/* free_pages_prepare() will clear PG_isolated. */
		list_del(&src->lru);
		migrate_folio_done(src, reason);
		return MIGRATEPAGE_SUCCESS;
	}

	newpage = get_new_page(&src->page, private);
	if (!newpage)
		return -ENOMEM;
	dst = page_folio(newpage);
	*dstp = dst;

	dst->private = NULL;
	rc = __migrate_folio_unmap(src, dst, force, avoid_force_lock, mode);
	if (rc == MIGRATEPAGE_UNMAP)
		return rc;

	/*
	 * A folio that has not been unmapped will be restored to
	 * right list unless we want to retry.
	 */
	if (rc != -EAGAIN && rc != -EDEADLOCK)
		list_move_tail(&src->lru, ret);
	if (rc == -EAGAIN || rc == -EDEADLOCK)
		ret = NULL;

	if (put_new_page)
		put_new_page(&dst->page, private);
	else
		folio_put(dst);
	migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
	migrate_folio_undo_dst(dst, dst_locked, put_new_page, private);

	return rc;
}

static int __migrate_folio_move(struct folio *src, struct folio *dst,
				enum migrate_mode mode)
/* Migrate the folio to the newly allocated folio in dst. */
static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
			      struct folio *src, struct folio *dst,
			      enum migrate_mode mode, enum migrate_reason reason,
			      struct list_head *ret)
{
	int rc;
	int page_was_mapped = 0;
@@ -1300,12 +1288,8 @@ static int __migrate_folio_move(struct folio *src, struct folio *dst,
	list_del(&dst->lru);

	rc = move_to_new_folio(dst, src, mode);

	if (rc == -EAGAIN) {
		list_add(&dst->lru, prev);
		__migrate_folio_record(dst, page_was_mapped, anon_vma);
		return rc;
	}
	if (rc)
		goto out;

	if (unlikely(!is_lru))
		goto out_unlock_both;
@@ -1319,70 +1303,49 @@ static int __migrate_folio_move(struct folio *src, struct folio *dst,
	 * unsuccessful, and other cases when a page has been temporarily
	 * isolated from the unevictable LRU: but this case is the easiest.
	 */
	if (rc == MIGRATEPAGE_SUCCESS) {
	folio_add_lru(dst);
	if (page_was_mapped)
		lru_add_drain();
	}

	if (page_was_mapped)
		remove_migration_ptes(src,
			rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
		remove_migration_ptes(src, dst, false);

out_unlock_both:
	folio_unlock(dst);
	/* Drop an anon_vma reference if we took one */
	if (anon_vma)
		put_anon_vma(anon_vma);
	folio_unlock(src);
	set_page_owner_migrate_reason(&dst->page, reason);
	/*
	 * If migration is successful, decrease refcount of dst,
	 * which will not free the page because new page owner increased
	 * refcounter.
	 */
	if (rc == MIGRATEPAGE_SUCCESS)
	folio_put(dst);

	return rc;
}

/* Migrate the folio to the newly allocated folio in dst. */
static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
			      struct folio *src, struct folio *dst,
			      enum migrate_mode mode, enum migrate_reason reason,
			      struct list_head *ret)
{
	int rc;

	rc = __migrate_folio_move(src, dst, mode);
	if (rc == MIGRATEPAGE_SUCCESS)
		set_page_owner_migrate_reason(&dst->page, reason);

	if (rc != -EAGAIN) {
	/*
		 * A folio that has been migrated has all references
		 * removed and will be freed. A folio that has not been
		 * migrated will have kept its references and be restored.
	 * A folio that has been migrated has all references removed
	 * and will be freed.
	 */
	list_del(&src->lru);
	}
	/* Drop an anon_vma reference if we took one */
	if (anon_vma)
		put_anon_vma(anon_vma);
	folio_unlock(src);
	migrate_folio_done(src, reason);

	return rc;
out:
	/*
	 * If migration is successful, releases reference grabbed during
	 * isolation. Otherwise, restore the folio to right list unless
	 * we want to retry.
	 * A folio that has not been migrated will be restored to
	 * right list unless we want to retry.
	 */
	if (rc == MIGRATEPAGE_SUCCESS) {
		migrate_folio_done(src, reason);
	} else if (rc != -EAGAIN) {
		list_add_tail(&src->lru, ret);

		if (put_new_page)
			put_new_page(&dst->page, private);
		else
			folio_put(dst);
	if (rc == -EAGAIN) {
		list_add(&dst->lru, prev);
		__migrate_folio_record(dst, page_was_mapped, anon_vma);
		return rc;
	}

	migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
	migrate_folio_undo_dst(dst, true, put_new_page, private);

	return rc;
}

@@ -1918,9 +1881,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,

		__migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
		migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
				       ret_folios);
				       true, ret_folios);
		list_del(&dst->lru);
		migrate_folio_undo_dst(dst, put_new_page, private);
		migrate_folio_undo_dst(dst, true, put_new_page, private);
		dst = dst2;
		dst2 = list_next_entry(dst, lru);
	}