Commit 40baf156 authored by David Hildenbrand's avatar David Hildenbrand Committed by Tong Tiangen
Browse files

mm: merge folio_is_secretmem() and folio_fast_pin_allowed() into gup_fast_folio_allowed()

mainline inclusion
from mainline-v6.10-rc1
commit f002882ca369aba3eece5006f3346ccf75ede7c5
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IBJ8HQ

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f002882ca369aba3eece5006f3346ccf75ede7c5

--------------------------------

folio_is_secretmem() is currently only used during GUP-fast.  Nowadays,
folio_fast_pin_allowed() performs similar checks during GUP-fast and
contains a lot of careful handling -- READ_ONCE() -- , sanity checks --
lockdep_assert_irqs_disabled() -- and helpful comments on how this
handling is safe and correct.

So let's merge folio_is_secretmem() into folio_fast_pin_allowed().  Rename
folio_fast_pin_allowed() to gup_fast_folio_allowed(), to better match the
new semantics.

Link: https://lkml.kernel.org/r/20240326143210.291116-4-david@redhat.com


Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Cc: xingwei lee <xrivendell7@gmail.com>
Cc: yue sun <samsun1006219@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarTong Tiangen <tongtiangen@huawei.com>
parent 30472c94
Loading
Loading
Loading
Loading
+2 −19
Original line number Diff line number Diff line
@@ -6,25 +6,8 @@

extern const struct address_space_operations secretmem_aops;

static inline bool folio_is_secretmem(struct folio *folio)
static inline bool secretmem_mapping(struct address_space *mapping)
{
	struct address_space *mapping;

	/*
	 * Using folio_mapping() is quite slow because of the actual call
	 * instruction.
	 * We know that secretmem pages are not compound, so we can
	 * save a couple of cycles here.
	 */
	if (folio_test_large(folio))
		return false;

	mapping = (struct address_space *)
		((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS);

	if (!mapping || mapping != folio->mapping)
		return false;

	return mapping->a_ops == &secretmem_aops;
}

@@ -38,7 +21,7 @@ static inline bool vma_is_secretmem(struct vm_area_struct *vma)
	return false;
}

static inline bool folio_is_secretmem(struct folio *folio)
static inline bool secretmem_mapping(struct address_space *mapping)
{
	return false;
}
+28 −20
Original line number Diff line number Diff line
@@ -2367,12 +2367,14 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
#ifdef CONFIG_HAVE_FAST_GUP

/*
 * Used in the GUP-fast path to determine whether a pin is permitted for a
 * specific folio.
 * Used in the GUP-fast path to determine whether GUP is permitted to work on
 * a specific folio.
 *
 * This call assumes the caller has pinned the folio, that the lowest page table
 * level still points to this folio, and that interrupts have been disabled.
 *
 * GUP-fast must reject all secretmem folios.
 *
 * Writing to pinned file-backed dirty tracked folios is inherently problematic
 * (see comment describing the writable_file_mapping_allowed() function). We
 * therefore try to avoid the most egregious case of a long-term mapping doing
@@ -2382,25 +2384,34 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
 * in the fast path, so instead we whitelist known good cases and if in doubt,
 * fall back to the slow path.
 */
static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags)
static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags)
{
	bool reject_file_backed = false;
	struct address_space *mapping;
	bool check_secretmem = false;
	unsigned long mapping_flags;

	/*
	 * If we aren't pinning then no problematic write can occur. A long term
	 * pin is the most egregious case so this is the one we disallow.
	 */
	if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) !=
	if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) ==
	    (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE))
		return true;
		reject_file_backed = true;

	/* We hold a folio reference, so we can safely access folio fields. */

	/* The folio is pinned, so we can safely access folio fields. */
	/* secretmem folios are always order-0 folios. */
	if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio))
		check_secretmem = true;

	if (!reject_file_backed && !check_secretmem)
		return true;

	if (WARN_ON_ONCE(folio_test_slab(folio)))
		return false;

	/* hugetlb mappings do not require dirty-tracking. */
	/* hugetlb neither requires dirty-tracking nor can be secretmem. */
	if (folio_test_hugetlb(folio))
		return true;

@@ -2436,10 +2447,12 @@ static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags)

	/*
	 * At this point, we know the mapping is non-null and points to an
	 * address_space object. The only remaining whitelisted file system is
	 * shmem.
	 * address_space object.
	 */
	return shmem_mapping(mapping);
	if (check_secretmem && secretmem_mapping(mapping))
		return false;
	/* The only remaining allowed file system is shmem. */
	return !reject_file_backed || shmem_mapping(mapping);
}

static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
@@ -2621,18 +2634,13 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
		if (!folio)
			goto pte_unmap;

		if (unlikely(folio_is_secretmem(folio))) {
			gup_put_folio(folio, 1, flags);
			goto pte_unmap;
		}

		if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
		    unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) {
			gup_put_folio(folio, 1, flags);
			goto pte_unmap;
		}

		if (!folio_fast_pin_allowed(folio, flags)) {
		if (!gup_fast_folio_allowed(folio, flags)) {
			gup_put_folio(folio, 1, flags);
			goto pte_unmap;
		}
@@ -2829,7 +2837,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
		return 0;
	}

	if (!folio_fast_pin_allowed(folio, flags)) {
	if (!gup_fast_folio_allowed(folio, flags)) {
		gup_put_folio(folio, refs, flags);
		return 0;
	}
@@ -2900,7 +2908,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
		return 0;
	}

	if (!folio_fast_pin_allowed(folio, flags)) {
	if (!gup_fast_folio_allowed(folio, flags)) {
		gup_put_folio(folio, refs, flags);
		return 0;
	}
@@ -2944,7 +2952,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
		return 0;
	}

	if (!folio_fast_pin_allowed(folio, flags)) {
	if (!gup_fast_folio_allowed(folio, flags)) {
		gup_put_folio(folio, refs, flags);
		return 0;
	}
@@ -2989,7 +2997,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
		return 0;
	}

	if (!folio_fast_pin_allowed(folio, flags)) {
	if (!gup_fast_folio_allowed(folio, flags)) {
		gup_put_folio(folio, refs, flags);
		return 0;
	}