Commit 09c02e56 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton
Browse files

swap: convert add_to_swap() to take a folio

The only caller already has a folio available, so this saves a conversion.
Also convert the return type to boolean.

Link: https://lkml.kernel.org/r/20220504182857.4013401-9-willy@infradead.org


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e2e3fdc7
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -32,7 +32,7 @@ extern struct address_space *swapper_spaces[];
		>> SWAP_ADDRESS_SPACE_SHIFT])

void show_swap_cache_info(void);
int add_to_swap(struct page *page);
bool add_to_swap(struct folio *folio);
void *get_shadow_from_swap_cache(swp_entry_t entry);
int add_to_swap_cache(struct page *page, swp_entry_t entry,
		      gfp_t gfp, void **shadowp);
@@ -119,9 +119,9 @@ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
	return find_get_page(mapping, index);
}

static inline int add_to_swap(struct page *page)
static inline bool add_to_swap(struct folio *folio)
{
	return 0;
	return false;
}

static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
+25 −22
Original line number Diff line number Diff line
@@ -176,24 +176,26 @@ void __delete_from_swap_cache(struct page *page,
}

/**
 * add_to_swap - allocate swap space for a page
 * @page: page we want to move to swap
 * add_to_swap - allocate swap space for a folio
 * @folio: folio we want to move to swap
 *
 * Allocate swap space for the page and add the page to the
 * swap cache.  Caller needs to hold the page lock. 
 * Allocate swap space for the folio and add the folio to the
 * swap cache.
 *
 * Context: Caller needs to hold the folio lock.
 * Return: Whether the folio was added to the swap cache.
 */
int add_to_swap(struct page *page)
bool add_to_swap(struct folio *folio)
{
	struct folio *folio = page_folio(page);
	swp_entry_t entry;
	int err;

	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(!PageUptodate(page), page);
	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
	VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);

	entry = folio_alloc_swap(folio);
	if (!entry.val)
		return 0;
		return false;

	/*
	 * XArray node allocations from PF_MEMALLOC contexts could
@@ -206,7 +208,7 @@ int add_to_swap(struct page *page)
	/*
	 * Add it to the swap cache.
	 */
	err = add_to_swap_cache(page, entry,
	err = add_to_swap_cache(&folio->page, entry,
			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
	if (err)
		/*
@@ -215,22 +217,23 @@ int add_to_swap(struct page *page)
		 */
		goto fail;
	/*
	 * Normally the page will be dirtied in unmap because its pte should be
	 * dirty. A special case is MADV_FREE page. The page's pte could have
	 * dirty bit cleared but the page's SwapBacked bit is still set because
	 * clearing the dirty bit and SwapBacked bit has no lock protected. For
	 * such page, unmap will not set dirty bit for it, so page reclaim will
	 * not write the page out. This can cause data corruption when the page
	 * is swap in later. Always setting the dirty bit for the page solves
	 * the problem.
	 * Normally the folio will be dirtied in unmap because its
	 * pte should be dirty. A special case is MADV_FREE page. The
	 * page's pte could have dirty bit cleared but the folio's
	 * SwapBacked flag is still set because clearing the dirty bit
	 * and SwapBacked flag has no lock protected. For such folio,
	 * unmap will not set dirty bit for it, so folio reclaim will
	 * not write the folio out. This can cause data corruption when
	 * the folio is swapped in later. Always setting the dirty flag
	 * for the folio solves the problem.
	 */
	set_page_dirty(page);
	folio_mark_dirty(folio);

	return 1;
	return true;

fail:
	put_swap_page(page, entry);
	return 0;
	put_swap_page(&folio->page, entry);
	return false;
}

/*
+3 −3
Original line number Diff line number Diff line
@@ -1731,8 +1731,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
								page_list))
						goto activate_locked;
				}
				if (!add_to_swap(page)) {
					if (!PageTransHuge(page))
				if (!add_to_swap(folio)) {
					if (!folio_test_large(folio))
						goto activate_locked_split;
					/* Fallback to swap normal pages */
					if (split_folio_to_list(folio,
@@ -1741,7 +1741,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
					count_vm_event(THP_SWPOUT_FALLBACK);
#endif
					if (!add_to_swap(page))
					if (!add_to_swap(folio))
						goto activate_locked_split;
				}