Commit 0a2c1e81 authored by Vishal Moola (Oracle)'s avatar Vishal Moola (Oracle) Committed by Andrew Morton
Browse files

mm/mempolicy: convert queue_pages_hugetlb() to queue_folios_hugetlb()

This change is in preparation for the conversion of queue_pages_required()
to queue_folio_required() and migrate_page_add() to migrate_folio_add().

Link: https://lkml.kernel.org/r/20230130201833.27042-5-vishal.moola@gmail.com


Signed-off-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: "Yin, Fengwei" <fengwei.yin@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 3dae02bb
Loading
Loading
Loading
Loading
+18 −11
Original line number Diff line number Diff line
@@ -558,7 +558,7 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
	return addr != end ? -EIO : 0;
}

static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
			       unsigned long addr, unsigned long end,
			       struct mm_walk *walk)
{
@@ -566,7 +566,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
#ifdef CONFIG_HUGETLB_PAGE
	struct queue_pages *qp = walk->private;
	unsigned long flags = (qp->flags & MPOL_MF_VALID);
	struct page *page;
	struct folio *folio;
	spinlock_t *ptl;
	pte_t entry;

@@ -574,13 +574,13 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
	entry = huge_ptep_get(pte);
	if (!pte_present(entry))
		goto unlock;
	page = pte_page(entry);
	if (!queue_pages_required(page, qp))
	folio = pfn_folio(pte_pfn(entry));
	if (!queue_pages_required(&folio->page, qp))
		goto unlock;

	if (flags == MPOL_MF_STRICT) {
		/*
		 * STRICT alone means only detecting misplaced page and no
		 * STRICT alone means only detecting misplaced folio and no
		 * need to further check other vma.
		 */
		ret = -EIO;
@@ -591,21 +591,28 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
		/*
		 * Must be STRICT with MOVE*, otherwise .test_walk() have
		 * stopped walking current vma.
		 * Detecting misplaced page but allow migrating pages which
		 * Detecting misplaced folio but allow migrating folios which
		 * have been queued.
		 */
		ret = 1;
		goto unlock;
	}

	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
	/*
	 * With MPOL_MF_MOVE, we try to migrate only unshared folios. If it
	 * is shared it is likely not worth migrating.
	 *
	 * To check if the folio is shared, ideally we want to make sure
	 * every page is mapped to the same process. Doing that is very
	 * expensive, so check the estimated mapcount of the folio instead.
	 */
	if (flags & (MPOL_MF_MOVE_ALL) ||
	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
	    (flags & MPOL_MF_MOVE && folio_estimated_sharers(folio) == 1 &&
	     !hugetlb_pmd_shared(pte))) {
		if (isolate_hugetlb(page_folio(page), qp->pagelist) &&
		if (isolate_hugetlb(folio, qp->pagelist) &&
			(flags & MPOL_MF_STRICT))
			/*
			 * Failed to isolate page but allow migrating pages
			 * Failed to isolate folio but allow migrating pages
			 * which have been queued.
			 */
			ret = 1;
@@ -703,7 +710,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
}

static const struct mm_walk_ops queue_pages_walk_ops = {
	.hugetlb_entry		= queue_pages_hugetlb,
	.hugetlb_entry		= queue_folios_hugetlb,
	.pmd_entry		= queue_folios_pte_range,
	.test_walk		= queue_pages_test_walk,
};