Commit 96f97c43 authored by Lorenzo Stoakes's avatar Lorenzo Stoakes Committed by Andrew Morton
Browse files

mm: mlock: update the interface to use folios

Update the mlock interface to accept folios rather than pages, bringing
the interface in line with the internal implementation.

munlock_vma_page() still requires a page_folio() conversion, however this
is consistent with the existent mlock_vma_page() implementation and a
product of rmap still dealing in pages rather than folios.

Link: https://lkml.kernel.org/r/cba12777c5544305014bc0cbec56bb4cc71477d8.1673526881.git.lstoakes@gmail.com


Signed-off-by: default avatarLorenzo Stoakes <lstoakes@gmail.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b213ef6b
Loading
Loading
Loading
Loading
+22 −16
Original line number Diff line number Diff line
@@ -533,10 +533,9 @@ extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
 * should be called with vma's mmap_lock held for read or write,
 * under page table lock for the pte/pmd being added or removed.
 *
 * mlock is usually called at the end of page_add_*_rmap(),
 * munlock at the end of page_remove_rmap(); but new anon
 * pages are managed by lru_cache_add_inactive_or_unevictable()
 * calling mlock_new_page().
 * mlock is usually called at the end of page_add_*_rmap(), munlock at
 * the end of page_remove_rmap(); but new anon folios are managed by
 * folio_add_lru_vma() calling mlock_new_folio().
 *
 * @compound is used to include pmd mappings of THPs, but filter out
 * pte mappings of THPs, which cannot be consistently counted: a pte
@@ -565,18 +564,25 @@ static inline void mlock_vma_page(struct page *page,
	mlock_vma_folio(page_folio(page), vma, compound);
}

void munlock_page(struct page *page);
static inline void munlock_vma_page(struct page *page,
void munlock_folio(struct folio *folio);

static inline void munlock_vma_folio(struct folio *folio,
			struct vm_area_struct *vma, bool compound)
{
	if (unlikely(vma->vm_flags & VM_LOCKED) &&
	    (compound || !PageTransCompound(page)))
		munlock_page(page);
	    (compound || !folio_test_large(folio)))
		munlock_folio(folio);
}

static inline void munlock_vma_page(struct page *page,
			struct vm_area_struct *vma, bool compound)
{
	munlock_vma_folio(page_folio(page), vma, compound);
}
void mlock_new_page(struct page *page);
bool need_mlock_page_drain(int cpu);
void mlock_page_drain_local(void);
void mlock_page_drain_remote(int cpu);
void mlock_new_folio(struct folio *folio);
bool need_mlock_drain(int cpu);
void mlock_drain_local(void);
void mlock_drain_remote(int cpu);

extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);

@@ -665,10 +671,10 @@ static inline void mlock_vma_page(struct page *page,
			struct vm_area_struct *vma, bool compound) { }
static inline void munlock_vma_page(struct page *page,
			struct vm_area_struct *vma, bool compound) { }
static inline void mlock_new_page(struct page *page) { }
static inline bool need_mlock_page_drain(int cpu) { return false; }
static inline void mlock_page_drain_local(void) { }
static inline void mlock_page_drain_remote(int cpu) { }
static inline void mlock_new_folio(struct folio *folio) { }
static inline bool need_mlock_drain(int cpu) { return false; }
static inline void mlock_drain_local(void) { }
static inline void mlock_drain_remote(int cpu) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
{
}
+1 −1
Original line number Diff line number Diff line
@@ -265,7 +265,7 @@ static bool remove_migration_pte(struct folio *folio,
			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
		}
		if (vma->vm_flags & VM_LOCKED)
			mlock_page_drain_local();
			mlock_drain_local();

		trace_remove_migration_pte(pvmw.address, pte_val(pte),
					   compound_order(new));
+18 −20
Original line number Diff line number Diff line
@@ -210,7 +210,7 @@ static void mlock_folio_batch(struct folio_batch *fbatch)
	folio_batch_reinit(fbatch);
}

void mlock_page_drain_local(void)
void mlock_drain_local(void)
{
	struct folio_batch *fbatch;

@@ -221,7 +221,7 @@ void mlock_page_drain_local(void)
	local_unlock(&mlock_fbatch.lock);
}

void mlock_page_drain_remote(int cpu)
void mlock_drain_remote(int cpu)
{
	struct folio_batch *fbatch;

@@ -231,7 +231,7 @@ void mlock_page_drain_remote(int cpu)
		mlock_folio_batch(fbatch);
}

bool need_mlock_page_drain(int cpu)
bool need_mlock_drain(int cpu)
{
	return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
}
@@ -262,13 +262,12 @@ void mlock_folio(struct folio *folio)
}

/**
 * mlock_new_page - mlock a newly allocated page not yet on LRU
 * @page: page to be mlocked, either a normal page or a THP head.
 * mlock_new_folio - mlock a newly allocated folio not yet on LRU
 * @folio: folio to be mlocked, either normal or a THP head.
 */
void mlock_new_page(struct page *page)
void mlock_new_folio(struct folio *folio)
{
	struct folio_batch *fbatch;
	struct folio *folio = page_folio(page);
	int nr_pages = folio_nr_pages(folio);

	local_lock(&mlock_fbatch.lock);
@@ -286,13 +285,12 @@ void mlock_new_page(struct page *page)
}

/**
 * munlock_page - munlock a page
 * @page: page to be munlocked, either a normal page or a THP head.
 * munlock_folio - munlock a folio
 * @folio: folio to be munlocked, either normal or a THP head.
 */
void munlock_page(struct page *page)
void munlock_folio(struct folio *folio)
{
	struct folio_batch *fbatch;
	struct folio *folio = page_folio(page);

	local_lock(&mlock_fbatch.lock);
	fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
@@ -314,7 +312,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
	struct vm_area_struct *vma = walk->vma;
	spinlock_t *ptl;
	pte_t *start_pte, *pte;
	struct page *page;
	struct folio *folio;

	ptl = pmd_trans_huge_lock(pmd, vma);
	if (ptl) {
@@ -322,11 +320,11 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
			goto out;
		if (is_huge_zero_pmd(*pmd))
			goto out;
		page = pmd_page(*pmd);
		folio = page_folio(pmd_page(*pmd));
		if (vma->vm_flags & VM_LOCKED)
			mlock_folio(page_folio(page));
			mlock_folio(folio);
		else
			munlock_page(page);
			munlock_folio(folio);
		goto out;
	}

@@ -334,15 +332,15 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
	for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
		if (!pte_present(*pte))
			continue;
		page = vm_normal_page(vma, addr, *pte);
		if (!page || is_zone_device_page(page))
		folio = vm_normal_folio(vma, addr, *pte);
		if (!folio || folio_is_zone_device(folio))
			continue;
		if (PageTransCompound(page))
		if (folio_test_large(folio))
			continue;
		if (vma->vm_flags & VM_LOCKED)
			mlock_folio(page_folio(page));
			mlock_folio(folio);
		else
			munlock_page(page);
			munlock_folio(folio);
	}
	pte_unmap(start_pte);
out:
+1 −1
Original line number Diff line number Diff line
@@ -8587,7 +8587,7 @@ static int page_alloc_cpu_dead(unsigned int cpu)
	struct zone *zone;

	lru_add_drain_cpu(cpu);
	mlock_page_drain_remote(cpu);
	mlock_drain_remote(cpu);
	drain_pages(cpu);

	/*
+2 −2
Original line number Diff line number Diff line
@@ -1764,7 +1764,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
		 */
		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
		if (vma->vm_flags & VM_LOCKED)
			mlock_page_drain_local();
			mlock_drain_local();
		folio_put(folio);
	}

@@ -2105,7 +2105,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
		 */
		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
		if (vma->vm_flags & VM_LOCKED)
			mlock_page_drain_local();
			mlock_drain_local();
		folio_put(folio);
	}

Loading