Commit 76fe313a authored by Christian König's avatar Christian König
Browse files

drm/nouveau: stop using TTMs fault callback



We already implemented the fault handler ourself,
just open code what is necessary here.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/392323/
parent d3ef581a
Loading
Loading
Loading
Loading
+26 −24
Original line number Diff line number Diff line
@@ -1226,8 +1226,7 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
	mutex_unlock(&drm->ttm.io_reserve_mutex);
}

static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1243,17 +1242,12 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
		    !nvbo->kind)
			return 0;

		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
			nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
						 0);

			ret = nouveau_bo_validate(nvbo, false, false);
			if (ret)
				return ret;
		}
		if (bo->mem.mem_type != TTM_PL_SYSTEM)
			return 0;
	}

		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);

	} else {
		/* make sure bo is in mappable vram */
		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
		    bo->mem.start + bo->mem.num_pages < mappable)
@@ -1270,7 +1264,16 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
		}

		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
	return nouveau_bo_validate(nvbo, false, false);
	}

	ret = nouveau_bo_validate(nvbo, false, false);
	if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
		return VM_FAULT_NOPAGE;
	else if (unlikely(ret))
		return VM_FAULT_SIGBUS;

	ttm_bo_move_to_lru_tail_unlocked(bo);
	return 0;
}

static int
@@ -1381,7 +1384,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
	.move_notify = nouveau_bo_move_ntfy,
	.move = nouveau_bo_move,
	.verify_access = nouveau_bo_verify_access,
	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
	.io_mem_free = &nouveau_ttm_io_mem_free,
};
+1 −0
Original line number Diff line number Diff line
@@ -89,6 +89,7 @@ void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
			 bool no_wait_gpu);
+6 −4
Original line number Diff line number Diff line
@@ -134,17 +134,19 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
	if (ret)
		return ret;

	nouveau_bo_del_io_reserve_lru(bo);
	ret = nouveau_ttm_fault_reserve_notify(bo);
	if (ret)
		goto error_unlock;

	nouveau_bo_del_io_reserve_lru(bo);
	prot = vm_get_page_prot(vma->vm_flags);
	ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
	nouveau_bo_add_io_reserve_lru(bo);
	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
		return ret;

	nouveau_bo_add_io_reserve_lru(bo);

error_unlock:
	dma_resv_unlock(bo->base.resv);

	return ret;
}