Commit 3adfe5a6 authored by Kefeng Wang's avatar Kefeng Wang Committed by Tong Tiangen
Browse files

mm: migrate: split folio_migrate_mapping()

mainline inclusion
from mainline-v6.11-rc1
commit 528815392f873f0af8c6cdc279c89bd0154cbf6a
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IAROKE
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=528815392f873f0af8c6cdc279c89bd0154cbf6a

--------------------------------

The folio refcount check is moved out for both !mapping and mapping folio,
also update comment from page to folio for folio_migrate_mapping().

No functional change intended.

Link: https://lkml.kernel.org/r/20240626085328.608006-4-wangkefeng.wang@huawei.com


Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jiaqi Yan <jiaqiyan@google.com>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Naoya Horiguchi <nao.horiguchi@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarTong Tiangen <tongtiangen@huawei.com>
parent c9cc4059
Loading
Loading
Loading
Loading
+22 −16
Original line number Diff line number Diff line
@@ -391,28 +391,23 @@ static int folio_expected_refs(struct address_space *mapping,
}

/*
 * Replace the page in the mapping.
 * Replace the folio in the mapping.
 *
 * The number of remaining references must be:
 * 1 for anonymous pages without a mapping
 * 2 for pages with a mapping
 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
 * 1 for anonymous folios without a mapping
 * 2 for folios with a mapping
 * 3 for folios with a mapping and PagePrivate/PagePrivate2 set.
 */
int folio_migrate_mapping(struct address_space *mapping,
		struct folio *newfolio, struct folio *folio, int extra_count)
static int __folio_migrate_mapping(struct address_space *mapping,
		struct folio *newfolio, struct folio *folio, int expected_count)
{
	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
	struct zone *oldzone, *newzone;
	int dirty;
	int expected_count = folio_expected_refs(mapping, folio) + extra_count;
	long nr = folio_nr_pages(folio);
	long entries, i;

	if (!mapping) {
		/* Anonymous page without mapping */
		if (folio_ref_count(folio) != expected_count)
			return -EAGAIN;

		/* Take off deferred split queue while frozen and memcg set */
		if (folio_test_large(folio) &&
		    folio_test_large_rmappable(folio)) {
@@ -462,7 +457,7 @@ int folio_migrate_mapping(struct address_space *mapping,
		entries = 1;
	}

	/* Move dirty while page refs frozen and newpage not yet exposed */
	/* Move dirty while folio refs frozen and newfolio not yet exposed */
	dirty = folio_test_dirty(folio);
	if (dirty) {
		folio_clear_dirty(folio);
@@ -476,7 +471,7 @@ int folio_migrate_mapping(struct address_space *mapping,
	}

	/*
	 * Drop cache reference from old page by unfreezing
	 * Drop cache reference from old folio by unfreezing
	 * to one less reference.
	 * We know this isn't the last reference.
	 */
@@ -492,11 +487,11 @@ int folio_migrate_mapping(struct address_space *mapping,

	/*
	 * If moved to a different zone then also account
	 * the page for that zone. Other VM counters will be
	 * the folio for that zone. Other VM counters will be
	 * taken care of when we establish references to the
	 * new page and drop references to the old page.
	 * new folio and drop references to the old folio.
	 *
	 * Note that anonymous pages are accounted for
	 * Note that anonymous folios are accounted for
	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
	 * are mapped to swap space.
	 */
@@ -536,6 +531,17 @@ int folio_migrate_mapping(struct address_space *mapping,

	return MIGRATEPAGE_SUCCESS;
}

int folio_migrate_mapping(struct address_space *mapping,
		struct folio *newfolio, struct folio *folio, int extra_count)
{
	int expected_count = folio_expected_refs(mapping, folio) + extra_count;

	if (folio_ref_count(folio) != expected_count)
		return -EAGAIN;

	return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
}
EXPORT_SYMBOL(folio_migrate_mapping);

/*