Commit dcc5d337 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

mm/mlock: Add mlock_vma_folio()



Convert mlock_page() into mlock_folio() and convert the callers.  Keep
mlock_vma_page() as a wrapper.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
parent e83c09a2
Loading
Loading
Loading
Loading
+11 −4
Original line number Diff line number Diff line
@@ -416,8 +416,8 @@ extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
 * pte mappings of THPs, which cannot be consistently counted: a pte
 * mapping of the THP head cannot be distinguished by the page alone.
 */
void mlock_page(struct page *page);
static inline void mlock_vma_page(struct page *page,
void mlock_folio(struct folio *folio);
static inline void mlock_vma_folio(struct folio *folio,
			struct vm_area_struct *vma, bool compound)
{
	/*
@@ -429,9 +429,16 @@ static inline void mlock_vma_page(struct page *page,
	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
	 */
	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
	    (compound || !PageTransCompound(page)))
		mlock_page(page);
	    (compound || !folio_test_large(folio)))
		mlock_folio(folio);
}

static inline void mlock_vma_page(struct page *page,
			struct vm_area_struct *vma, bool compound)
{
	mlock_vma_folio(page_folio(page), vma, compound);
}

void munlock_page(struct page *page);
static inline void munlock_vma_page(struct page *page,
			struct vm_area_struct *vma, bool compound)
+11 −11
Original line number Diff line number Diff line
@@ -218,23 +218,23 @@ bool need_mlock_page_drain(int cpu)
}

/**
 * mlock_page - mlock a page already on (or temporarily off) LRU
 * @page: page to be mlocked, either a normal page or a THP head.
 * mlock_folio - mlock a folio already on (or temporarily off) LRU
 * @folio: folio to be mlocked.
 */
void mlock_page(struct page *page)
void mlock_folio(struct folio *folio)
{
	struct pagevec *pvec = &get_cpu_var(mlock_pvec);

	if (!TestSetPageMlocked(page)) {
		int nr_pages = thp_nr_pages(page);
	if (!folio_test_set_mlocked(folio)) {
		int nr_pages = folio_nr_pages(folio);

		mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
		zone_stat_mod_folio(folio, NR_MLOCK, nr_pages);
		__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
	}

	get_page(page);
	if (!pagevec_add(pvec, mlock_lru(page)) ||
	    PageHead(page) || lru_cache_disabled())
	folio_get(folio);
	if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
	    folio_test_large(folio) || lru_cache_disabled())
		mlock_pagevec(pvec);
	put_cpu_var(mlock_pvec);
}
@@ -296,7 +296,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
			goto out;
		page = pmd_page(*pmd);
		if (vma->vm_flags & VM_LOCKED)
			mlock_page(page);
			mlock_folio(page_folio(page));
		else
			munlock_page(page);
		goto out;
@@ -312,7 +312,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
		if (PageTransCompound(page))
			continue;
		if (vma->vm_flags & VM_LOCKED)
			mlock_page(page);
			mlock_folio(page_folio(page));
		else
			munlock_page(page);
	}