Commit 2798bbe7 authored by Hugh Dickins's avatar Hugh Dickins Committed by Andrew Morton
Browse files

mm/page_vma_mapped: pte_offset_map_nolock() not pte_lockptr()

map_pte() use pte_offset_map_nolock(), to make sure of the ptl belonging
to pte, even if pmd entry is then changed racily: page_vma_mapped_walk()
use that instead of getting pte_lockptr() later, or restart if map_pte()
found no page table.

Link: https://lkml.kernel.org/r/cba186e0-5ed7-e81b-6cd-dade4c33c248@google.com


Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Song Liu <song@kernel.org>
Cc: Steven Price <steven.price@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Zack Rusin <zackr@vmware.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 90f43b0a
Loading
Loading
Loading
Loading
+22 −6
Original line number Diff line number Diff line
@@ -13,16 +13,28 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw)
	return false;
}

static bool map_pte(struct page_vma_mapped_walk *pvmw)
static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp)
{
	if (pvmw->flags & PVMW_SYNC) {
		/* Use the stricter lookup */
		pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
						pvmw->address, &pvmw->ptl);
		return true;
		*ptlp = pvmw->ptl;
		return !!pvmw->pte;
	}

	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
	/*
	 * It is important to return the ptl corresponding to pte,
	 * in case *pvmw->pmd changes underneath us; so we need to
	 * return it even when choosing not to lock, in case caller
	 * proceeds to loop over next ptes, and finds a match later.
	 * Though, in most cases, page lock already protects this.
	 */
	pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
					  pvmw->address, ptlp);
	if (!pvmw->pte)
		return false;

	if (pvmw->flags & PVMW_MIGRATION) {
		if (!is_swap_pte(*pvmw->pte))
			return false;
@@ -51,7 +63,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw)
	} else if (!pte_present(*pvmw->pte)) {
		return false;
	}
	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
	pvmw->ptl = *ptlp;
	spin_lock(pvmw->ptl);
	return true;
}
@@ -156,6 +168,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
	struct vm_area_struct *vma = pvmw->vma;
	struct mm_struct *mm = vma->vm_mm;
	unsigned long end;
	spinlock_t *ptl;
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
@@ -257,8 +270,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
			step_forward(pvmw, PMD_SIZE);
			continue;
		}
		if (!map_pte(pvmw))
		if (!map_pte(pvmw, &ptl)) {
			if (!pvmw->pte)
				goto restart;
			goto next_pte;
		}
this_pte:
		if (check_pte(pvmw))
			return true;
@@ -281,7 +297,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
		} while (pte_none(*pvmw->pte));

		if (!pvmw->ptl) {
			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
			pvmw->ptl = ptl;
			spin_lock(pvmw->ptl);
		}
		goto this_pte;