Commit 02712bc3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe
Browse files

mm/hmm: move hmm_vma_range_done and hmm_vma_fault to nouveau

These two functions are marked as a legacy APIs to get rid of, but seem to
suit the current nouveau flow.  Move it to the only user in preparation
for fixing a locking bug involving caller and callee.  All comments
referring to the old API have been removed as this now is a driver private
helper.

Link: https://lore.kernel.org/r/20190724065258.16603-3-hch@lst.de


Tested-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 2bcbeaef
Loading
Loading
Loading
Loading
+44 −2
Original line number Diff line number Diff line
@@ -475,6 +475,48 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
		fault->inst, fault->addr, fault->access);
}

static inline bool
nouveau_range_done(struct hmm_range *range)
{
	bool ret = hmm_range_valid(range);

	hmm_range_unregister(range);
	return ret;
}

static int
nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range,
		    bool block)
{
	long ret;

	range->default_flags = 0;
	range->pfn_flags_mask = -1UL;

	ret = hmm_range_register(range, mirror,
				 range->start, range->end,
				 PAGE_SHIFT);
	if (ret)
		return (int)ret;

	if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
		up_read(&range->vma->vm_mm->mmap_sem);
		return -EAGAIN;
	}

	ret = hmm_range_fault(range, block);
	if (ret <= 0) {
		if (ret == -EBUSY || !ret) {
			up_read(&range->vma->vm_mm->mmap_sem);
			ret = -EBUSY;
		} else if (ret == -EAGAIN)
			ret = -EBUSY;
		hmm_range_unregister(range);
		return ret;
	}
	return 0;
}

static int
nouveau_svm_fault(struct nvif_notify *notify)
{
@@ -649,10 +691,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
		range.values = nouveau_svm_pfn_values;
		range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
		ret = hmm_vma_fault(&svmm->mirror, &range, true);
		ret = nouveau_range_fault(&svmm->mirror, &range, true);
		if (ret == 0) {
			mutex_lock(&svmm->mutex);
			if (!hmm_vma_range_done(&range)) {
			if (!nouveau_range_done(&range)) {
				mutex_unlock(&svmm->mutex);
				goto again;
			}
+0 −54
Original line number Diff line number Diff line
@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
 */
#define HMM_RANGE_DEFAULT_TIMEOUT 1000

/* This is a temporary helper to avoid merge conflict between trees. */
static inline bool hmm_vma_range_done(struct hmm_range *range)
{
	bool ret = hmm_range_valid(range);

	hmm_range_unregister(range);
	return ret;
}

/* This is a temporary helper to avoid merge conflict between trees. */
static inline int hmm_vma_fault(struct hmm_mirror *mirror,
				struct hmm_range *range, bool block)
{
	long ret;

	/*
	 * With the old API the driver must set each individual entries with
	 * the requested flags (valid, write, ...). So here we set the mask to
	 * keep intact the entries provided by the driver and zero out the
	 * default_flags.
	 */
	range->default_flags = 0;
	range->pfn_flags_mask = -1UL;

	ret = hmm_range_register(range, mirror,
				 range->start, range->end,
				 PAGE_SHIFT);
	if (ret)
		return (int)ret;

	if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
		/*
		 * The mmap_sem was taken by driver we release it here and
		 * returns -EAGAIN which correspond to mmap_sem have been
		 * drop in the old API.
		 */
		up_read(&range->vma->vm_mm->mmap_sem);
		return -EAGAIN;
	}

	ret = hmm_range_fault(range, block);
	if (ret <= 0) {
		if (ret == -EBUSY || !ret) {
			/* Same as above, drop mmap_sem to match old API. */
			up_read(&range->vma->vm_mm->mmap_sem);
			ret = -EBUSY;
		} else if (ret == -EAGAIN)
			ret = -EBUSY;
		hmm_range_unregister(range);
		return ret;
	}
	return 0;
}

/* Below are for HMM internal use only! Not to be used by device driver! */
static inline void hmm_mm_init(struct mm_struct *mm)
{