Commit 5ef3abe9 authored by Baolin Wang's avatar Baolin Wang Committed by Zheng Zengkai
Browse files

mm/hugetlb: fix races when looking up a CONT-PTE/PMD size hugetlb page

mainline inclusion
from mainline-v6.1-rc1
commit fac35ba7
category: bugfix
bugzilla: 187864, https://gitee.com/src-openeuler/kernel/issues/I5X1Z9
CVE: CVE-2022-3623

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/commit/?id=fac35ba763ed07ba93154c95ffc0c4a55023707f

--------------------------------

On some architectures (like ARM64), it can support CONT-PTE/PMD size
hugetlb, which means it can support not only PMD/PUD size hugetlb (2M and
1G), but also CONT-PTE/PMD size(64K and 32M) if a 4K page size specified.

So when looking up a CONT-PTE size hugetlb page by follow_page(), it will
use pte_offset_map_lock() to get the pte entry lock for the CONT-PTE size
hugetlb in follow_page_pte().  However this pte entry lock is incorrect
for the CONT-PTE size hugetlb, since we should use huge_pte_lock() to get
the correct lock, which is mm->page_table_lock.

That means the pte entry of the CONT-PTE size hugetlb under current pte
lock is unstable in follow_page_pte(), we can continue to migrate or
poison the pte entry of the CONT-PTE size hugetlb, which can cause some
potential race issues, even though they are under the 'pte lock'.

For example, suppose thread A is trying to look up a CONT-PTE size hugetlb
page by move_pages() syscall under the lock, however antoher thread B can
migrate the CONT-PTE hugetlb page at the same time, which will cause
thread A to get an incorrect page, if thread A also wants to do page
migration, then data inconsistency error occurs.

Moreover we have the same issue for CONT-PMD size hugetlb in
follow_huge_pmd().

To fix above issues, rename the follow_huge_pmd() as follow_huge_pmd_pte()
to handle PMD and PTE level size hugetlb, which uses huge_pte_lock() to
get the correct pte entry lock to make the pte entry stable.

Mike said:

Support for CONT_PMD/_PTE was added with bb9dd3df ("arm64: hugetlb:
refactor find_num_contig()").  Patch series "Support for contiguous pte
hugepages", v4.  However, I do not believe these code paths were
executed until migration support was added with 5480280d ("arm64/mm:
enable HugeTLB migration for contiguous bit HugeTLB pages") I would go
with 5480280d for the Fixes: targe.

Link: https://lkml.kernel.org/r/635f43bdd85ac2615a58405da82b4d33c6e5eb05.1662017562.git.baolin.wang@linux.alibaba.com


Fixes: 5480280d ("arm64/mm: enable HugeTLB migration for contiguous bit HugeTLB pages")
Signed-off-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Suggested-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Conflicts:
	mm/hugetlb.c
Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 10403e5b
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -196,8 +196,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
struct page *follow_huge_pd(struct vm_area_struct *vma,
			    unsigned long address, hugepd_t hpd,
			    int flags, int pdshift);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
				pmd_t *pmd, int flags);
struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
				 int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
				pud_t *pud, int flags);
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
@@ -283,8 +283,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
	return NULL;
}

static inline struct page *follow_huge_pmd(struct mm_struct *mm,
				unsigned long address, pmd_t *pmd, int flags)
static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
				unsigned long address, int flags)
{
	return NULL;
}
+13 −1
Original line number Diff line number Diff line
@@ -486,6 +486,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
			 (FOLL_PIN | FOLL_GET)))
		return ERR_PTR(-EINVAL);

	/*
	 * Considering PTE level hugetlb, like continuous-PTE hugetlb on
	 * ARM64 architecture.
	 */
	if (is_vm_hugetlb_page(vma)) {
		page = follow_huge_pmd_pte(vma, address, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
	}

retry:
	if (unlikely(pmd_bad(*pmd)))
		return no_page_table(vma, flags);
@@ -641,7 +653,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
	if (pmd_none(pmdval))
		return no_page_table(vma, flags);
	if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
		page = follow_huge_pmd(mm, address, pmd, flags);
		page = follow_huge_pmd_pte(vma, address, flags);
		if (page)
			return page;
		return no_page_table(vma, flags);
+13 −14
Original line number Diff line number Diff line
@@ -6008,12 +6008,13 @@ follow_huge_pd(struct vm_area_struct *vma,
}

struct page * __weak
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
		pmd_t *pmd, int flags)
follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
{
	struct hstate *h = hstate_vma(vma);
	struct mm_struct *mm = vma->vm_mm;
	struct page *page = NULL;
	spinlock_t *ptl;
	pte_t pte;
	pte_t *ptep, pte;

	/* FOLL_GET and FOLL_PIN are mutually exclusive. */
	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
@@ -6021,17 +6022,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
		return NULL;

retry:
	ptl = pmd_lockptr(mm, pmd);
	spin_lock(ptl);
	/*
	 * make sure that the address range covered by this pmd is not
	 * unmapped from other threads.
	 */
	if (!pmd_huge(*pmd))
		goto out;
	pte = huge_ptep_get((pte_t *)pmd);
	ptep = huge_pte_offset(mm, address, huge_page_size(h));
	if (!ptep)
		return NULL;

	ptl = huge_pte_lock(h, mm, ptep);
	pte = huge_ptep_get(ptep);
	if (pte_present(pte)) {
		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
		page = pte_page(pte) +
			((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
		/*
		 * try_grab_page() should always succeed here, because: a) we
		 * hold the pmd (ptl) lock, and b) we've just checked that the
@@ -6047,7 +6046,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
	} else {
		if (is_hugetlb_entry_migration(pte)) {
			spin_unlock(ptl);
			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
			__migration_entry_wait(mm, ptep, ptl);
			goto retry;
		}
		/*