Commit 04ac3249 authored by Liu Shixin's avatar Liu Shixin
Browse files

mm: hugetlb: independent PMD page table shared count

mainline inclusion
from mainline-v6.13-rc6
commit 59d9094df3d79443937add8700b2ef1a866b1081
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBBGL0
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=59d9094df3d79443937add8700b2ef1a866b1081

--------------------------------

The folio refcount may be increased unexpectly through try_get_folio() by
caller such as split_huge_pages.  In huge_pmd_unshare(), we use refcount
to check whether a pmd page table is shared.  The check is incorrect if
the refcount is increased by the above caller, and this can cause the page
table leaked:

 BUG: Bad page state in process sh  pfn:109324
 page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x66 pfn:0x109324
 flags: 0x17ffff800000000(node=0|zone=2|lastcpupid=0xfffff)
 page_type: f2(table)
 raw: 017ffff800000000 0000000000000000 0000000000000000 0000000000000000
 raw: 0000000000000066 0000000000000000 00000000f2000000 0000000000000000
 page dumped because: nonzero mapcount
 ...
 CPU: 31 UID: 0 PID: 7515 Comm: sh Kdump: loaded Tainted: G    B              6.13.0-rc2master+ #7
 Tainted: [B]=BAD_PAGE
 Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015
 Call trace:
  show_stack+0x20/0x38 (C)
  dump_stack_lvl+0x80/0xf8
  dump_stack+0x18/0x28
  bad_page+0x8c/0x130
  free_page_is_bad_report+0xa4/0xb0
  free_unref_page+0x3cc/0x620
  __folio_put+0xf4/0x158
  split_huge_pages_all+0x1e0/0x3e8
  split_huge_pages_write+0x25c/0x2d8
  full_proxy_write+0x64/0xd8
  vfs_write+0xcc/0x280
  ksys_write+0x70/0x110
  __arm64_sys_write+0x24/0x38
  invoke_syscall+0x50/0x120
  el0_svc_common.constprop.0+0xc8/0xf0
  do_el0_svc+0x24/0x38
  el0_svc+0x34/0x128
  el0t_64_sync_handler+0xc8/0xd0
  el0t_64_sync+0x190/0x198

The issue may be triggered by damon, offline_page, page_idle, etc, which
will increase the refcount of page table.

1. The page table itself will be discarded after reporting the
   "nonzero mapcount".

2. The HugeTLB page mapped by the page table miss freeing since we
   treat the page table as shared and a shared page table will not be
   unmapped.

Fix it by introducing independent PMD page table shared count.  As
described by comment, pt_index/pt_mm/pt_frag_refcount are used for s390
gmap, x86 pgds and powerpc, pt_share_count is used for x86/arm64/riscv
pmds, so we can reuse the field as pt_share_count.

Link: https://lkml.kernel.org/r/20241216071147.3984217-1-liushixin2@huawei.com


Fixes: 39dde65c ("[PATCH] shared page table for hugetlb page")
Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Ken Chen <kenneth.w.chen@intel.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Conflicts:
	include/linux/hugetlb.h
	include/linux/mm.h
	include/linux/mm_types.h
	mm/hugetlb.c
	mm/rmap.c
[ Context conflict with commit ea919671517a and 38ca8a185389.
  Config conflict with commit 188cac58a8bc.
  Add missing vma in huge_pmd_unshare() from commit 34ae204f. ]
Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
parent 130de291
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -138,7 +138,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
			unsigned long addr, unsigned long sz);
pte_t *huge_pte_offset(struct mm_struct *mm,
		       unsigned long addr, unsigned long sz);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
				unsigned long *addr, pte_t *ptep);
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
				unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -171,8 +172,8 @@ static inline unsigned long hugetlb_total_pages(void)
	return 0;
}

static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
					pte_t *ptep)
static inline int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
					unsigned long *addr, pte_t *ptep)
{
	return 0;
}
+1 −0
Original line number Diff line number Diff line
@@ -2104,6 +2104,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	page->pmd_huge_pte = NULL;
#endif
	page_pmd_pts_init(page);
	return ptlock_init(page);
}

+29 −0
Original line number Diff line number Diff line
@@ -145,6 +145,9 @@ struct page {
			union {
				struct mm_struct *pt_mm; /* x86 pgds only */
				atomic_t pt_frag_refcount; /* powerpc */
#if defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && !defined(__GENKSYMS__)
				atomic_t pt_share_count;
#endif
			};
#if ALLOC_SPLIT_PTLOCKS
			spinlock_t *ptl;
@@ -209,6 +212,32 @@ struct page {
#endif
} _struct_page_alignment;

#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
static inline void page_pmd_pts_init(struct page *page)
{
	atomic_set(&page->pt_share_count, 0);
}

static inline void page_pmd_pts_inc(struct page *page)
{
	atomic_inc(&page->pt_share_count);
}

static inline void page_pmd_pts_dec(struct page *page)
{
	atomic_dec(&page->pt_share_count);
}

static inline int page_pmd_pts_count(struct page *page)
{
	return atomic_read(&page->pt_share_count);
}
#else
static inline void page_pmd_pts_init(struct page *page)
{
}
#endif

#define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)

+13 −13
Original line number Diff line number Diff line
@@ -5054,7 +5054,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
			continue;

		ptl = huge_pte_lock(h, mm, ptep);
		if (huge_pmd_unshare(mm, &address, ptep)) {
		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
			spin_unlock(ptl);
			tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
			force_flush = true;
@@ -6126,7 +6126,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
		if (!ptep)
			continue;
		ptl = huge_pte_lock(h, mm, ptep);
		if (huge_pmd_unshare(mm, &address, ptep)) {
		if (huge_pmd_unshare(mm, vma, &address, ptep)) {
			pages++;
			spin_unlock(ptl);
			shared_pmd = true;
@@ -6444,7 +6444,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
			spte = huge_pte_offset(svma->vm_mm, saddr,
					       vma_mmu_pagesize(svma));
			if (spte) {
				get_page(virt_to_page(spte));
				page_pmd_pts_inc(virt_to_page(spte));
				break;
			}
		}
@@ -6459,7 +6459,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
				(pmd_t *)((unsigned long)spte & PAGE_MASK));
		mm_inc_nr_pmds(mm);
	} else {
		put_page(virt_to_page(spte));
		page_pmd_pts_dec(virt_to_page(spte));
	}
	spin_unlock(ptl);
out:
@@ -6471,27 +6471,26 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
/*
 * unmap huge page backed by shared pte.
 *
 * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
 * indicated by page_count > 1, unmap is achieved by clearing pud and
 * decrementing the ref count. If count == 1, the pte page is not shared.
 *
 * called with page table lock held.
 *
 * returns: 1 successfully unmapped a shared pte page
 *	    0 the underlying pte page is not shared, or it is the last user
 */
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
		     unsigned long *addr, pte_t *ptep)
{
	unsigned long sz = huge_page_size(hstate_vma(vma));
	pgd_t *pgd = pgd_offset(mm, *addr);
	p4d_t *p4d = p4d_offset(pgd, *addr);
	pud_t *pud = pud_offset(p4d, *addr);

	BUG_ON(page_count(virt_to_page(ptep)) == 0);
	if (page_count(virt_to_page(ptep)) == 1)
	if (sz != PMD_SIZE)
		return 0;
	if (!page_pmd_pts_count(virt_to_page(ptep)))
		return 0;

	pud_clear(pud);
	put_page(virt_to_page(ptep));
	page_pmd_pts_dec(virt_to_page(ptep));
	mm_dec_nr_pmds(mm);
	*addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
	return 1;
@@ -6503,7 +6502,8 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
	return NULL;
}

int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
				unsigned long *addr, pte_t *ptep)
{
	return 0;
}
+1 −1
Original line number Diff line number Diff line
@@ -1449,7 +1449,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
		address = pvmw.address;

		if (PageHuge(page)) {
			if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
			if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
				/*
				 * huge_pmd_unshare unmapped an entire PMD
				 * page.  There is no way of knowing exactly