Commit 515f12b9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull HMM fixes from Jason Gunthorpe:
 "Fix the locking around nouveau's use of the hmm_range_* APIs. It works
  correctly in the success case, but many of the the edge cases have
  missing unlocks or double unlocks.

  The diffstat is a bit big as Christoph did a comprehensive job to move
  the obsolete API from the core header and into the driver before
  fixing its flow, but the risk of regression from this code motion is
  low"

* tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  nouveau: unlock mmap_sem on all errors from nouveau_range_fault
  nouveau: remove the block parameter to nouveau_range_fault
  mm/hmm: move hmm_vma_range_done and hmm_vma_fault to nouveau
  mm/hmm: always return EBUSY for invalid ranges in hmm_range_{fault,snapshot}
parents 2a11c76e de4ee728
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -237,7 +237,7 @@ The usage pattern is::
      ret = hmm_range_snapshot(&range);
      if (ret) {
          up_read(&mm->mmap_sem);
          if (ret == -EAGAIN) {
          if (ret == -EBUSY) {
            /*
             * No need to check hmm_range_wait_until_valid() return value
             * on retry we will get proper error with hmm_range_snapshot()
+44 −3
Original line number Diff line number Diff line
@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
		fault->inst, fault->addr, fault->access);
}

static inline bool
nouveau_range_done(struct hmm_range *range)
{
	bool ret = hmm_range_valid(range);

	hmm_range_unregister(range);
	return ret;
}

static int
nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
{
	long ret;

	range->default_flags = 0;
	range->pfn_flags_mask = -1UL;

	ret = hmm_range_register(range, mirror,
				 range->start, range->end,
				 PAGE_SHIFT);
	if (ret) {
		up_read(&range->vma->vm_mm->mmap_sem);
		return (int)ret;
	}

	if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
		up_read(&range->vma->vm_mm->mmap_sem);
		return -EAGAIN;
	}

	ret = hmm_range_fault(range, true);
	if (ret <= 0) {
		if (ret == 0)
			ret = -EBUSY;
		up_read(&range->vma->vm_mm->mmap_sem);
		hmm_range_unregister(range);
		return ret;
	}
	return 0;
}

static int
nouveau_svm_fault(struct nvif_notify *notify)
{
@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
		range.values = nouveau_svm_pfn_values;
		range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
		ret = hmm_vma_fault(&svmm->mirror, &range, true);
		ret = nouveau_range_fault(&svmm->mirror, &range);
		if (ret == 0) {
			mutex_lock(&svmm->mutex);
			if (!hmm_vma_range_done(&range)) {
			if (!nouveau_range_done(&range)) {
				mutex_unlock(&svmm->mutex);
				goto again;
			}
@@ -666,8 +707,8 @@ nouveau_svm_fault(struct nvif_notify *notify)
						NULL);
			svmm->vmm->vmm.object.client->super = false;
			mutex_unlock(&svmm->mutex);
		}
			up_read(&svmm->mm->mmap_sem);
		}

		/* Cancel any faults in the window whose pages didn't manage
		 * to keep their valid bit, or stay writeable when required.
+0 −54
Original line number Diff line number Diff line
@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
 */
#define HMM_RANGE_DEFAULT_TIMEOUT 1000

/* This is a temporary helper to avoid merge conflict between trees. */
static inline bool hmm_vma_range_done(struct hmm_range *range)
{
	bool ret = hmm_range_valid(range);

	hmm_range_unregister(range);
	return ret;
}

/* This is a temporary helper to avoid merge conflict between trees. */
static inline int hmm_vma_fault(struct hmm_mirror *mirror,
				struct hmm_range *range, bool block)
{
	long ret;

	/*
	 * With the old API the driver must set each individual entries with
	 * the requested flags (valid, write, ...). So here we set the mask to
	 * keep intact the entries provided by the driver and zero out the
	 * default_flags.
	 */
	range->default_flags = 0;
	range->pfn_flags_mask = -1UL;

	ret = hmm_range_register(range, mirror,
				 range->start, range->end,
				 PAGE_SHIFT);
	if (ret)
		return (int)ret;

	if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
		/*
		 * The mmap_sem was taken by driver we release it here and
		 * returns -EAGAIN which correspond to mmap_sem have been
		 * drop in the old API.
		 */
		up_read(&range->vma->vm_mm->mmap_sem);
		return -EAGAIN;
	}

	ret = hmm_range_fault(range, block);
	if (ret <= 0) {
		if (ret == -EBUSY || !ret) {
			/* Same as above, drop mmap_sem to match old API. */
			up_read(&range->vma->vm_mm->mmap_sem);
			ret = -EBUSY;
		} else if (ret == -EAGAIN)
			ret = -EBUSY;
		hmm_range_unregister(range);
		return ret;
	}
	return 0;
}

/* Below are for HMM internal use only! Not to be used by device driver! */
static inline void hmm_mm_init(struct mm_struct *mm)
{
+4 −6
Original line number Diff line number Diff line
@@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
 * @range: range
 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
 *          permission (for instance asking for write and range is read only),
 *          -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
 *          -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
 *          vma or it is illegal to access that range), number of valid pages
 *          in range->pfns[] (from range start address).
 *
@@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range)
	do {
		/* If range is no longer valid force retry. */
		if (!range->valid)
			return -EAGAIN;
			return -EBUSY;

		vma = find_vma(hmm->mm, start);
		if (vma == NULL || (vma->vm_flags & device_vma))
@@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block)

	do {
		/* If range is no longer valid force retry. */
		if (!range->valid) {
			up_read(&hmm->mm->mmap_sem);
			return -EAGAIN;
		}
		if (!range->valid)
			return -EBUSY;

		vma = find_vma(hmm->mm, start);
		if (vma == NULL || (vma->vm_flags & device_vma))