Commit 869f7ee6 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

mm/rmap: Convert try_to_unmap() to take a folio



Change all three callers and the worker function try_to_unmap_one().

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
parent af28a988
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -194,7 +194,7 @@ int folio_referenced(struct folio *, int is_locked,
			struct mem_cgroup *memcg, unsigned long *vm_flags);

void try_to_migrate(struct page *page, enum ttu_flags flags);
void try_to_unmap(struct page *, enum ttu_flags flags);
void try_to_unmap(struct folio *, enum ttu_flags flags);

int make_device_exclusive_range(struct mm_struct *mm, unsigned long start,
				unsigned long end, struct page **pages,
@@ -309,7 +309,7 @@ static inline int folio_referenced(struct folio *folio, int is_locked,
	return 0;
}

static inline void try_to_unmap(struct page *page, enum ttu_flags flags)
static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags)
{
}

+2 −1
Original line number Diff line number Diff line
@@ -2251,6 +2251,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,

static void unmap_page(struct page *page)
{
	struct folio *folio = page_folio(page);
	enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
		TTU_SYNC;

@@ -2264,7 +2265,7 @@ static void unmap_page(struct page *page)
	if (PageAnon(page))
		try_to_migrate(page, ttu_flags);
	else
		try_to_unmap(page, ttu_flags | TTU_IGNORE_MLOCK);
		try_to_unmap(folio, ttu_flags | TTU_IGNORE_MLOCK);

	VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
}
+2 −1
Original line number Diff line number Diff line
@@ -1834,7 +1834,8 @@ static void collapse_file(struct mm_struct *mm,
		}

		if (page_mapped(page))
			try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
			try_to_unmap(page_folio(page),
					TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);

		xas_lock_irq(&xas);
		xas_set(&xas, index);
+4 −3
Original line number Diff line number Diff line
@@ -1347,6 +1347,7 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
				  int flags, struct page *hpage)
{
	struct folio *folio = page_folio(hpage);
	enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC;
	struct address_space *mapping;
	LIST_HEAD(tokill);
@@ -1412,7 +1413,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
		collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);

	if (!PageHuge(hpage)) {
		try_to_unmap(hpage, ttu);
		try_to_unmap(folio, ttu);
	} else {
		if (!PageAnon(hpage)) {
			/*
@@ -1424,12 +1425,12 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
			 */
			mapping = hugetlb_page_mapping_lock_write(hpage);
			if (mapping) {
				try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
				try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
				i_mmap_unlock_write(mapping);
			} else
				pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
		} else {
			try_to_unmap(hpage, ttu);
			try_to_unmap(folio, ttu);
		}
	}

+8 −5
Original line number Diff line number Diff line
@@ -1690,10 +1690,13 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
				      DEFAULT_RATELIMIT_BURST);

	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
		struct folio *folio;

		if (!pfn_valid(pfn))
			continue;
		page = pfn_to_page(pfn);
		head = compound_head(page);
		folio = page_folio(page);
		head = &folio->page;

		if (PageHuge(page)) {
			pfn = page_to_pfn(head) + compound_nr(head) - 1;
@@ -1710,10 +1713,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
		 * the unmap as the catch all safety net).
		 */
		if (PageHWPoison(page)) {
			if (WARN_ON(PageLRU(page)))
				isolate_lru_page(page);
			if (page_mapped(page))
				try_to_unmap(page, TTU_IGNORE_MLOCK);
			if (WARN_ON(folio_test_lru(folio)))
				folio_isolate_lru(folio);
			if (folio_mapped(folio))
				try_to_unmap(folio, TTU_IGNORE_MLOCK);
			continue;
		}

Loading