Commit a1c6606b authored by David Hildenbrand's avatar David Hildenbrand Committed by Wen Zhiwei
Browse files

mm/hugetlb: enforce that PMD PT sharing has split PMD PT locks

stable inclusion
from stable-v6.6.72
commit ec500230d39a36e91c5aadbf4e73ff909a233446
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBQN9L

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=ec500230d39a36e91c5aadbf4e73ff909a233446

--------------------------------

[ Upstream commit 188cac58a8bcdf82c7f63275b68f7a46871e45d6 ]

Sharing page tables between processes but falling back to per-MM page
table locks cannot possibly work.

So, let's make sure that we do have split PMD locks by adding a new
Kconfig option and letting that depend on CONFIG_SPLIT_PMD_PTLOCKS.

Link: https://lkml.kernel.org/r/20240726150728.3159964-3-david@redhat.com


Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarMike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: "Naveen N. Rao" <naveen.n.rao@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Stable-dep-of: 59d9094df3d7 ("mm: hugetlb: independent PMD page table shared count")
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
Signed-off-by: default avatarWen Zhiwei <wenzhiwei@kylinos.cn>
parent 9a1c0480
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -310,6 +310,10 @@ config HUGETLB_ALLOC_LIMIT

	  If unsure, say N.

config HUGETLB_PMD_PAGE_TABLE_SHARING
	def_bool HUGETLB_PAGE
	depends on ARCH_WANT_HUGE_PMD_SHARE && SPLIT_PMD_PTLOCKS

config ARCH_HAS_GIGANTIC_PAGE
	bool

+2 −3
Original line number Diff line number Diff line
@@ -1308,7 +1308,7 @@ static inline __init void hugetlb_cma_reserve(int order)
}
#endif

#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
	return page_count(virt_to_page(pte)) > 1;
@@ -1344,8 +1344,7 @@ bool __vma_private_lock(struct vm_area_struct *vma);
static inline pte_t *
hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
{
#if defined(CONFIG_HUGETLB_PAGE) && \
	defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP)
	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;

	/*
+4 −4
Original line number Diff line number Diff line
@@ -7127,7 +7127,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
	return 0;
}

#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
static unsigned long page_table_shareable(struct vm_area_struct *svma,
				struct vm_area_struct *vma,
				unsigned long addr, pgoff_t idx)
@@ -7289,7 +7289,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
	return 1;
}

#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
#else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */

pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
		      unsigned long addr, pud_t *pud)
@@ -7312,7 +7312,7 @@ bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{
	return false;
}
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
#endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */

#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -7410,7 +7410,7 @@ unsigned long hugetlb_mask_last_page(struct hstate *h)
/* See description above.  Architectures can provide their own version. */
__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
{
#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
	if (huge_page_size(h) == PMD_SIZE)
		return PUD_SIZE - PMD_SIZE;
#endif