Commit 5b78ed24 authored by Luigi Rizzo's avatar Luigi Rizzo Committed by Linus Torvalds
Browse files

mm/pagemap: add mmap_assert_locked() annotations to find_vma*()

find_vma() and variants need protection when used.  This patch adds
mmap_assert_lock() calls in the functions.

To make sure the invariant is satisfied, we also need to add a
mmap_read_lock() around the get_user_pages_remote() call in
get_arg_page().  The lock is not strictly necessary because the mm has
been newly created, but the extra cost is limited because the same mutex
was also acquired shortly before in __bprm_mm_init(), so it is hot and
uncontended.

[penguin-kernel@i-love.sakura.ne.jp: TOMOYO needs the same protection which get_arg_page() needs]
  Link: https://lkml.kernel.org/r/58bb6bf7-a57e-8a40-e74b-39584b415152@i-love.sakura.ne.jp

Link: https://lkml.kernel.org/r/20210731175341.3458608-1-lrizzo@google.com


Signed-off-by: default avatarLuigi Rizzo <lrizzo@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent e15710bf
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -217,8 +217,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
	 * We are doing an exec().  'current' is the process
	 * doing the exec and bprm->mm is the new process's mm.
	 */
	mmap_read_lock(bprm->mm);
	ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
			&page, NULL, NULL);
	mmap_read_unlock(bprm->mm);
	if (ret <= 0)
		return NULL;

+2 −0
Original line number Diff line number Diff line
@@ -534,6 +534,7 @@ static int find_vma_links(struct mm_struct *mm, unsigned long addr,
{
	struct rb_node **__rb_link, *__rb_parent, *rb_prev;

	mmap_assert_locked(mm);
	__rb_link = &mm->mm_rb.rb_node;
	rb_prev = __rb_parent = NULL;

@@ -2303,6 +2304,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
	struct rb_node *rb_node;
	struct vm_area_struct *vma;

	mmap_assert_locked(mm);
	/* Check the cache first. */
	vma = vmacache_find(mm, addr);
	if (likely(vma))
+9 −4
Original line number Diff line number Diff line
@@ -897,6 +897,9 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
		      struct tomoyo_page_dump *dump)
{
	struct page *page;
#ifdef CONFIG_MMU
	int ret;
#endif

	/* dump->data is released by tomoyo_find_next_domain(). */
	if (!dump->data) {
@@ -909,11 +912,13 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
	/*
	 * This is called at execve() time in order to dig around
	 * in the argv/environment of the new proceess
	 * (represented by bprm).  'current' is the process doing
	 * the execve().
	 * (represented by bprm).
	 */
	if (get_user_pages_remote(bprm->mm, pos, 1,
				FOLL_FORCE, &page, NULL, NULL) <= 0)
	mmap_read_lock(bprm->mm);
	ret = get_user_pages_remote(bprm->mm, pos, 1,
				    FOLL_FORCE, &page, NULL, NULL);
	mmap_read_unlock(bprm->mm);
	if (ret <= 0)
		return false;
#else
	page = bprm->page[pos / PAGE_SIZE];