Commit 28965f0f authored by Vishal Moola (Oracle)'s avatar Vishal Moola (Oracle) Committed by Andrew Morton
Browse files

userfaultfd: replace lru_cache functions with folio_add functions

Replaces lru_cache_add() and lru_cache_add_inactive_or_unevictable() with
folio_add_lru() and folio_add_lru_vma().  This is in preparation for the
removal of lru_cache_add().

Link: https://lkml.kernel.org/r/20221101175326.13265-4-vishal.moola@gmail.com


Signed-off-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 063aaad7
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -66,6 +66,7 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
	bool vm_shared = dst_vma->vm_flags & VM_SHARED;
	bool page_in_cache = page_mapping(page);
	spinlock_t *ptl;
	struct folio *folio;
	struct inode *inode;
	pgoff_t offset, max_off;

@@ -113,14 +114,15 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
	if (!pte_none_mostly(*dst_pte))
		goto out_unlock;

	folio = page_folio(page);
	if (page_in_cache) {
		/* Usually, cache pages are already added to LRU */
		if (newly_allocated)
			lru_cache_add(page);
			folio_add_lru(folio);
		page_add_file_rmap(page, dst_vma, false);
	} else {
		page_add_new_anon_rmap(page, dst_vma, dst_addr);
		lru_cache_add_inactive_or_unevictable(page, dst_vma);
		folio_add_lru_vma(folio, dst_vma);
	}

	/*