Commit cae106dd authored by David Stevens's avatar David Stevens Committed by Andrew Morton
Browse files

mm/khugepaged: refactor collapse_file control flow

Add a rollback label to deal with failure, instead of continuously
checking for RESULT_SUCCESS, to make it easier to add more failure cases. 
The refactoring also allows the collapse_file tracepoint to include hpage
on success (instead of NULL).

Link: https://lkml.kernel.org/r/20230404120117.2562166-3-stevensd@google.com


Signed-off-by: default avatarDavid Stevens <stevensd@chromium.org>
Acked-by: default avatarPeter Xu <peterx@redhat.com>
Reviewed-by: default avatarYang Shi <shy828301@gmail.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jiaqi Yan <jiaqiyan@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent efa3d814
Loading
Loading
Loading
Loading
+113 −117
Original line number Diff line number Diff line
@@ -1894,6 +1894,12 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
	if (result != SCAN_SUCCEED)
		goto out;

	__SetPageLocked(hpage);
	if (is_shmem)
		__SetPageSwapBacked(hpage);
	hpage->index = start;
	hpage->mapping = mapping;

	/*
	 * Ensure we have slots for all the pages in the range.  This is
	 * almost certainly a no-op because most of the pages must be present
@@ -1906,16 +1912,10 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
		xas_unlock_irq(&xas);
		if (!xas_nomem(&xas, GFP_KERNEL)) {
			result = SCAN_FAIL;
			goto out;
			goto rollback;
		}
	} while (1);

	__SetPageLocked(hpage);
	if (is_shmem)
		__SetPageSwapBacked(hpage);
	hpage->index = start;
	hpage->mapping = mapping;

	/*
	 * At this point the hpage is locked and not up-to-date.
	 * It's safe to insert it into the page cache, because nobody would
@@ -2152,7 +2152,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
	 */
	try_to_unmap_flush();

	if (result == SCAN_SUCCEED) {
	if (result != SCAN_SUCCEED)
		goto rollback;

	/*
	 * Replacing old pages with new one has succeeded, now we
	 * attempt to copy the contents.
@@ -2163,21 +2165,17 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
			clear_highpage(hpage + (index % HPAGE_PMD_NR));
			index++;
		}
			if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR),
					     page) > 0) {
		if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
			result = SCAN_COPY_MC;
				break;
			goto rollback;
		}
		index++;
	}
		while (result == SCAN_SUCCEED && index < end) {
	while (index < end) {
		clear_highpage(hpage + (index % HPAGE_PMD_NR));
		index++;
	}
	}

	nr = thp_nr_pages(hpage);
	if (result == SCAN_SUCCEED) {
	/*
	 * Copying old pages to huge one has succeeded, now we
	 * need to free the old pages.
@@ -2192,6 +2190,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
		put_page(page);
	}

	nr = thp_nr_pages(hpage);
	xas_lock_irq(&xas);
	if (is_shmem)
		__mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
@@ -2222,8 +2221,9 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
	result = retract_page_tables(mapping, start, mm, addr, hpage,
				     cc);
	unlock_page(hpage);
		hpage = NULL;
	} else {
	goto out;

rollback:
	/* Something went wrong: roll back page cache changes */
	xas_lock_irq(&xas);
	if (nr_none) {
@@ -2274,15 +2274,11 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
	xas_unlock_irq(&xas);

	hpage->mapping = NULL;
	}

	if (hpage)
	unlock_page(hpage);
	put_page(hpage);
out:
	VM_BUG_ON(!list_empty(&pagelist));
	if (hpage)
		put_page(hpage);

	trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
	return result;
}