Commit 29a22b9e authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton
Browse files

mm: handle userfaults under VMA lock

Enable handle_userfault to operate under VMA lock by releasing VMA lock
instead of mmap_lock and retrying.  Note that FAULT_FLAG_RETRY_NOWAIT
should never be used when handling faults under per-VMA lock protection
because that would break the assumption that lock is dropped on retry.

[surenb@google.com: fix a lockdep issue in vma_assert_write_locked]
  Link: https://lkml.kernel.org/r/20230712195652.969194-1-surenb@google.com
Link: https://lkml.kernel.org/r/20230630211957.1341547-7-surenb@google.com


Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Acked-by: default avatarPeter Xu <peterx@redhat.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Hillf Danton <hdanton@sina.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Michel Lespinasse <michel@lespinasse.org>
Cc: Minchan Kim <minchan@google.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 1235ccd0
Loading
Loading
Loading
Loading
+14 −20
Original line number Diff line number Diff line
@@ -277,17 +277,16 @@ static inline struct uffd_msg userfault_msg(unsigned long address,
 * hugepmd ranges.
 */
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
					 struct vm_area_struct *vma,
					 unsigned long address,
					 unsigned long flags,
					      struct vm_fault *vmf,
					      unsigned long reason)
{
	struct vm_area_struct *vma = vmf->vma;
	pte_t *ptep, pte;
	bool ret = true;

	mmap_assert_locked(ctx->mm);
	assert_fault_locked(vmf);

	ptep = hugetlb_walk(vma, address, vma_mmu_pagesize(vma));
	ptep = hugetlb_walk(vma, vmf->address, vma_mmu_pagesize(vma));
	if (!ptep)
		goto out;

@@ -308,9 +307,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
}
#else
static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
					 struct vm_area_struct *vma,
					 unsigned long address,
					 unsigned long flags,
					      struct vm_fault *vmf,
					      unsigned long reason)
{
	return false;	/* should never get here */
@@ -325,11 +322,11 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
 * threads.
 */
static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
					 unsigned long address,
					 unsigned long flags,
					 struct vm_fault *vmf,
					 unsigned long reason)
{
	struct mm_struct *mm = ctx->mm;
	unsigned long address = vmf->address;
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
@@ -338,7 +335,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
	pte_t ptent;
	bool ret = true;

	mmap_assert_locked(mm);
	assert_fault_locked(vmf);

	pgd = pgd_offset(mm, address);
	if (!pgd_present(*pgd))
@@ -440,7 +437,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
	 * Coredumping runs without mmap_lock so we can only check that
	 * the mmap_lock is held, if PF_DUMPCORE was not set.
	 */
	mmap_assert_locked(mm);
	assert_fault_locked(vmf);

	ctx = vma->vm_userfaultfd_ctx.ctx;
	if (!ctx)
@@ -556,15 +553,12 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
	spin_unlock_irq(&ctx->fault_pending_wqh.lock);

	if (!is_vm_hugetlb_page(vma))
		must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
						  reason);
		must_wait = userfaultfd_must_wait(ctx, vmf, reason);
	else
		must_wait = userfaultfd_huge_must_wait(ctx, vma,
						       vmf->address,
						       vmf->flags, reason);
		must_wait = userfaultfd_huge_must_wait(ctx, vmf, reason);
	if (is_vm_hugetlb_page(vma))
		hugetlb_vma_unlock_read(vma);
	mmap_read_unlock(mm);
	release_fault_lock(vmf);

	if (likely(must_wait && !READ_ONCE(ctx->released))) {
		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
+20 −0
Original line number Diff line number Diff line
@@ -679,6 +679,7 @@ static inline void vma_end_read(struct vm_area_struct *vma)
	rcu_read_unlock();
}

/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
{
	mmap_assert_write_locked(vma->vm_mm);
@@ -721,6 +722,12 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma)
	VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
}

static inline void vma_assert_locked(struct vm_area_struct *vma)
{
	if (!rwsem_is_locked(&vma->vm_lock->lock))
		vma_assert_write_locked(vma);
}

static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
{
	/* When detaching vma should be write-locked */
@@ -737,6 +744,14 @@ static inline void release_fault_lock(struct vm_fault *vmf)
		mmap_read_unlock(vmf->vma->vm_mm);
}

static inline void assert_fault_locked(struct vm_fault *vmf)
{
	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
		vma_assert_locked(vmf->vma);
	else
		mmap_assert_locked(vmf->vma->vm_mm);
}

struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
					  unsigned long address);

@@ -762,6 +777,11 @@ static inline void release_fault_lock(struct vm_fault *vmf)
	mmap_read_unlock(vmf->vma->vm_mm);
}

static inline void assert_fault_locked(struct vm_fault *vmf)
{
	mmap_assert_locked(vmf->vma->vm_mm);
}

#endif /* CONFIG_PER_VMA_LOCK */

extern const struct vm_operations_struct vma_dummy_vm_ops;
+1 −8
Original line number Diff line number Diff line
@@ -5471,14 +5471,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
	 * concurrent mremap() with MREMAP_DONTUNMAP could dissociate the VMA
	 * from its anon_vma.
	 */
	if (vma_is_anonymous(vma) && !vma->anon_vma)
		goto inval_end_read;

	/*
	 * Due to the possibility of userfault handler dropping mmap_lock, avoid
	 * it for now and fall back to page fault handling under mmap_lock.
	 */
	if (userfaultfd_armed(vma))
	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma))
		goto inval_end_read;

	/* Check since vm_start/vm_end might change before we lock the VMA */