Commit 9e072793 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge tag 'misc-habanalabs-next-2020-09-22' of...

Merge tag 'misc-habanalabs-next-2020-09-22' of git://people.freedesktop.org/~gabbayo/linux into char-misc-next

Oded writes:

This tag contains the following changes for kernel 5.10-rc1:

- Stop using the DRM's dma-fence module and instead use kernel completions.
- Support PCIe AER
- Use dma_mmap_coherent for memory allocated using dma_alloc_coherent
- Use smallest possible alignment when allocating virtual addresses in our
  MMU driver.
- Refactor MMU driver code to be device-oriented
- Allow user to check CS status without any sleep
- Add an option to map a Command Buffer to the Device's MMU
- Expose sync manager resource allocation to user through INFO IOCTL
- Convert code to use standard BIT(), GENMASK() and FIELD_PREP()
- Many small fixes (casting, better error messages, remove unused
  defines, h/w configuration fixes, etc.)

* tag 'misc-habanalabs-next-2020-09-22' of git://people.freedesktop.org/~gabbayo/linux: (46 commits)
  habanalabs: update scratchpad register map
  habanalabs: add indication of security-enabled F/W
  habanalabs/gaudi: fix DMA completions max outstanding to 15
  habanalabs/gaudi: remove axi drain support
  habanalabs: update firmware interface file
  habanalabs: Add an option to map CB to device MMU
  habanalabs: Save context in a command buffer object
  habanalabs: no need for DMA_SHARED_BUFFER
  habanalabs: allow to wait on CS without sleep
  habanalabs/gaudi: increase timeout for boot fit load
  habanalabs: add debugfs support for MMU with 6 HOPs
  habanalabs: add num_hops to hl_mmu_properties
  habanalabs: refactor MMU as device-oriented
  habanalabs: rename mmu.c to mmu_v1.c
  habanalabs: use smallest possible alignment for virtual addresses
  habanalabs: check flag before reset because of f/w event
  habanalabs: increase PQ COMP_OFFSET by one nibble
  habanalabs: Fix alignment issue in cpucp_info structure
  habanalabs: remove unused define
  habanalabs: remove unused ASIC function pointer
  ...
parents e82ed736 f279e5cd
Loading
Loading
Loading
Loading
+17 −1
Original line number Diff line number Diff line
@@ -2,13 +2,17 @@ What: /sys/class/habanalabs/hl<n>/armcp_kernel_ver
Date:           Jan 2019
KernelVersion:  5.1
Contact:        oded.gabbay@gmail.com
Description:    Version of the Linux kernel running on the device's CPU
Description:    Version of the Linux kernel running on the device's CPU.
                Will be DEPRECATED in Linux kernel version 5.10, and be
                replaced with cpucp_kernel_ver

What:           /sys/class/habanalabs/hl<n>/armcp_ver
Date:           Jan 2019
KernelVersion:  5.1
Contact:        oded.gabbay@gmail.com
Description:    Version of the application running on the device's CPU
                Will be DEPRECATED in Linux kernel version 5.10, and be
                replaced with cpucp_ver

What:           /sys/class/habanalabs/hl<n>/clk_max_freq_mhz
Date:           Jun 2019
@@ -33,6 +37,18 @@ KernelVersion: 5.1
Contact:        oded.gabbay@gmail.com
Description:    Version of the Device's CPLD F/W

What:           /sys/class/habanalabs/hl<n>/cpucp_kernel_ver
Date:           Oct 2020
KernelVersion:  5.10
Contact:        oded.gabbay@gmail.com
Description:    Version of the Linux kernel running on the device's CPU

What:           /sys/class/habanalabs/hl<n>/cpucp_ver
Date:           Oct 2020
KernelVersion:  5.10
Contact:        oded.gabbay@gmail.com
Description:    Version of the application running on the device's CPU

What:           /sys/class/habanalabs/hl<n>/device_type
Date:           Jan 2019
KernelVersion:  5.1
+0 −1
Original line number Diff line number Diff line
@@ -7,7 +7,6 @@ config HABANA_AI
	tristate "HabanaAI accelerators (habanalabs)"
	depends on PCI && HAS_IOMEM
	select FRAME_VECTOR
	select DMA_SHARED_BUFFER
	select GENERIC_ALLOCATOR
	select HWMON
	help
+2 −2
Original line number Diff line number Diff line
@@ -3,5 +3,5 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
		common/asid.o common/habanalabs_ioctl.o \
		common/command_buffer.o common/hw_queue.o common/irq.o \
		common/sysfs.o common/hwmon.o common/memory.o \
		common/command_submission.o common/mmu.o common/firmware_if.o \
		common/pci.o
		common/command_submission.o common/mmu.o common/mmu_v1.o \
		common/firmware_if.o common/pci.o
+210 −19
Original line number Diff line number Diff line
@@ -13,6 +13,131 @@
#include <linux/uaccess.h>
#include <linux/genalloc.h>

static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
	struct hl_device *hdev = ctx->hdev;
	struct asic_fixed_properties *prop = &hdev->asic_prop;
	struct hl_vm_va_block *va_block, *tmp;
	dma_addr_t bus_addr;
	u64 virt_addr;
	u32 page_size = prop->pmmu.page_size;
	s32 offset;
	int rc;

	if (!hdev->supports_cb_mapping) {
		dev_err_ratelimited(hdev->dev,
				"Cannot map CB because no VA range is allocated for CB mapping\n");
		return -EINVAL;
	}

	if (!hdev->mmu_enable) {
		dev_err_ratelimited(hdev->dev,
				"Cannot map CB because MMU is disabled\n");
		return -EINVAL;
	}

	INIT_LIST_HEAD(&cb->va_block_list);

	for (bus_addr = cb->bus_address;
			bus_addr < cb->bus_address + cb->size;
			bus_addr += page_size) {

		virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
		if (!virt_addr) {
			dev_err(hdev->dev,
				"Failed to allocate device virtual address for CB\n");
			rc = -ENOMEM;
			goto err_va_pool_free;
		}

		va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
		if (!va_block) {
			rc = -ENOMEM;
			gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
			goto err_va_pool_free;
		}

		va_block->start = virt_addr;
		va_block->end = virt_addr + page_size;
		va_block->size = page_size;
		list_add_tail(&va_block->node, &cb->va_block_list);
	}

	mutex_lock(&ctx->mmu_lock);

	bus_addr = cb->bus_address;
	offset = 0;
	list_for_each_entry(va_block, &cb->va_block_list, node) {
		rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size,
				list_is_last(&va_block->node,
						&cb->va_block_list));
		if (rc) {
			dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
				va_block->start);
			goto err_va_umap;
		}

		bus_addr += va_block->size;
		offset += va_block->size;
	}

	hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);

	mutex_unlock(&ctx->mmu_lock);

	cb->is_mmu_mapped = true;

	return 0;

err_va_umap:
	list_for_each_entry(va_block, &cb->va_block_list, node) {
		if (offset <= 0)
			break;
		hl_mmu_unmap(ctx, va_block->start, va_block->size,
				offset <= va_block->size);
		offset -= va_block->size;
	}

	hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);

	mutex_unlock(&ctx->mmu_lock);

err_va_pool_free:
	list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
		gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
		list_del(&va_block->node);
		kfree(va_block);
	}

	return rc;
}

static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
	struct hl_device *hdev = ctx->hdev;
	struct hl_vm_va_block *va_block, *tmp;

	mutex_lock(&ctx->mmu_lock);

	list_for_each_entry(va_block, &cb->va_block_list, node)
		if (hl_mmu_unmap(ctx, va_block->start, va_block->size,
				list_is_last(&va_block->node,
						&cb->va_block_list)))
			dev_warn_ratelimited(hdev->dev,
					"Failed to unmap CB's va 0x%llx\n",
					va_block->start);

	hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);

	mutex_unlock(&ctx->mmu_lock);

	list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
		gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
		list_del(&va_block->node);
		kfree(va_block);
	}
}

static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
	if (cb->is_internal)
@@ -47,6 +172,11 @@ static void cb_release(struct kref *ref)

	hl_debugfs_remove_cb(cb);

	if (cb->is_mmu_mapped)
		cb_unmap_mem(cb->ctx, cb);

	hl_ctx_put(cb->ctx);

	cb_do_release(hdev, cb);
}

@@ -107,11 +237,12 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
}

int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
			u32 cb_size, u64 *handle, int ctx_id, bool internal_cb)
			struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
			bool map_cb, u64 *handle)
{
	struct hl_cb *cb;
	bool alloc_new_cb = true;
	int rc;
	int rc, ctx_id = ctx->asid;

	/*
	 * Can't use generic function to check this because of special case
@@ -163,7 +294,21 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
	}

	cb->hdev = hdev;
	cb->ctx_id = ctx_id;
	cb->ctx = ctx;
	hl_ctx_get(hdev, cb->ctx);

	if (map_cb) {
		if (ctx_id == HL_KERNEL_ASID_ID) {
			dev_err(hdev->dev,
				"CB mapping is not supported for kernel context\n");
			rc = -EINVAL;
			goto release_cb;
		}

		rc = cb_map_mem(ctx, cb);
		if (rc)
			goto release_cb;
	}

	spin_lock(&mgr->cb_lock);
	rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
@@ -171,10 +316,10 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,

	if (rc < 0) {
		dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
		goto release_cb;
		goto unmap_mem;
	}

	cb->id = rc;
	cb->id = (u64) rc;

	kref_init(&cb->refcount);
	spin_lock_init(&cb->lock);
@@ -183,14 +328,18 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
	 * idr is 32-bit so we can safely OR it with a mask that is above
	 * 32 bit
	 */
	*handle = cb->id | HL_MMAP_CB_MASK;
	*handle = cb->id | HL_MMAP_TYPE_CB;
	*handle <<= PAGE_SHIFT;

	hl_debugfs_add_cb(cb);

	return 0;

unmap_mem:
	if (cb->is_mmu_mapped)
		cb_unmap_mem(cb->ctx, cb);
release_cb:
	hl_ctx_put(cb->ctx);
	cb_do_release(hdev, cb);
out_err:
	*handle = 0;
@@ -250,9 +399,10 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
				args->in.cb_size, HL_MAX_CB_SIZE);
			rc = -EINVAL;
		} else {
			rc = hl_cb_create(hdev, &hpriv->cb_mgr,
					args->in.cb_size, &handle,
					hpriv->ctx->asid, false);
			rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
					args->in.cb_size, false,
					!!(args->in.flags & HL_CB_FLAGS_MAP),
					&handle);
		}

		memset(args, 0, sizeof(*args));
@@ -300,11 +450,14 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
{
	struct hl_device *hdev = hpriv->hdev;
	struct hl_cb *cb;
	phys_addr_t address;
	u32 handle, user_cb_size;
	int rc;

	/* We use the page offset to hold the idr and thus we need to clear
	 * it before doing the mmap itself
	 */
	handle = vma->vm_pgoff;
	vma->vm_pgoff = 0;

	/* reference was taken here */
	cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
@@ -356,12 +509,8 @@ int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)

	vma->vm_private_data = cb;

	/* Calculate address for CB */
	address = virt_to_phys((void *) (uintptr_t) cb->kernel_address);

	rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
					address, cb->size);

	rc = hdev->asic_funcs->cb_mmap(hdev, vma, (void *) cb->kernel_address,
					cb->bus_address, cb->size);
	if (rc) {
		spin_lock(&cb->lock);
		cb->mmap = false;
@@ -425,7 +574,7 @@ void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
		if (kref_put(&cb->refcount, cb_release) != 1)
			dev_err(hdev->dev,
				"CB %d for CTX ID %d is still alive\n",
				id, cb->ctx_id);
				id, cb->ctx->asid);
	}

	idr_destroy(&mgr->cb_handles);
@@ -438,8 +587,8 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
	struct hl_cb *cb;
	int rc;

	rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
			HL_KERNEL_ASID_ID, internal_cb);
	rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
				internal_cb, false, &cb_handle);
	if (rc) {
		dev_err(hdev->dev,
			"Failed to allocate CB for the kernel driver %d\n", rc);
@@ -495,3 +644,45 @@ int hl_cb_pool_fini(struct hl_device *hdev)

	return 0;
}

int hl_cb_va_pool_init(struct hl_ctx *ctx)
{
	struct hl_device *hdev = ctx->hdev;
	struct asic_fixed_properties *prop = &hdev->asic_prop;
	int rc;

	if (!hdev->supports_cb_mapping)
		return 0;

	ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
	if (!ctx->cb_va_pool) {
		dev_err(hdev->dev,
			"Failed to create VA gen pool for CB mapping\n");
		return -ENOMEM;
	}

	rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
			prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
	if (rc) {
		dev_err(hdev->dev,
			"Failed to add memory to VA gen pool for CB mapping\n");
		goto err_pool_destroy;
	}

	return 0;

err_pool_destroy:
	gen_pool_destroy(ctx->cb_va_pool);

	return rc;
}

void hl_cb_va_pool_fini(struct hl_ctx *ctx)
{
	struct hl_device *hdev = ctx->hdev;

	if (!hdev->supports_cb_mapping)
		return;

	gen_pool_destroy(ctx->cb_va_pool);
}
+56 −51
Original line number Diff line number Diff line
@@ -38,26 +38,10 @@ void hl_sob_reset_error(struct kref *ref)
			hw_sob->q_idx, hw_sob->sob_id);
}

static const char *hl_fence_get_driver_name(struct dma_fence *fence)
{
	return "HabanaLabs";
}

static const char *hl_fence_get_timeline_name(struct dma_fence *fence)
{
	struct hl_cs_compl *hl_cs_compl =
		container_of(fence, struct hl_cs_compl, base_fence);

	return dev_name(hl_cs_compl->hdev->dev);
}

static bool hl_fence_enable_signaling(struct dma_fence *fence)
{
	return true;
}

static void hl_fence_release(struct dma_fence *fence)
static void hl_fence_release(struct kref *kref)
{
	struct hl_fence *fence =
		container_of(kref, struct hl_fence, refcount);
	struct hl_cs_compl *hl_cs_cmpl =
		container_of(fence, struct hl_cs_compl, base_fence);
	struct hl_device *hdev = hl_cs_cmpl->hdev;
@@ -99,15 +83,27 @@ static void hl_fence_release(struct dma_fence *fence)
	}

free:
	kfree_rcu(hl_cs_cmpl, base_fence.rcu);
	kfree(hl_cs_cmpl);
}

static const struct dma_fence_ops hl_fence_ops = {
	.get_driver_name = hl_fence_get_driver_name,
	.get_timeline_name = hl_fence_get_timeline_name,
	.enable_signaling = hl_fence_enable_signaling,
	.release = hl_fence_release
};
void hl_fence_put(struct hl_fence *fence)
{
	if (fence)
		kref_put(&fence->refcount, hl_fence_release);
}

void hl_fence_get(struct hl_fence *fence)
{
	if (fence)
		kref_get(&fence->refcount);
}

static void hl_fence_init(struct hl_fence *fence)
{
	kref_init(&fence->refcount);
	fence->error = 0;
	init_completion(&fence->completion);
}

static void cs_get(struct hl_cs *cs)
{
@@ -256,6 +252,8 @@ static void cs_counters_aggregate(struct hl_device *hdev, struct hl_ctx *ctx)
			ctx->cs_counters.parsing_drop_cnt;
	hdev->aggregated_cs_counters.queue_full_drop_cnt +=
			ctx->cs_counters.queue_full_drop_cnt;
	hdev->aggregated_cs_counters.max_cs_in_flight_drop_cnt +=
			ctx->cs_counters.max_cs_in_flight_drop_cnt;
}

static void cs_do_release(struct kref *ref)
@@ -336,7 +334,7 @@ static void cs_do_release(struct kref *ref)
		 * In case the wait for signal CS was submitted, the put occurs
		 * in init_signal_wait_cs() right before hanging on the PQ.
		 */
		dma_fence_put(cs->signal_fence);
		hl_fence_put(cs->signal_fence);
	}

	/*
@@ -348,19 +346,18 @@ static void cs_do_release(struct kref *ref)
	hl_ctx_put(cs->ctx);

	/* We need to mark an error for not submitted because in that case
	 * the dma fence release flow is different. Mainly, we don't need
	 * the hl fence release flow is different. Mainly, we don't need
	 * to handle hw_sob for signal/wait
	 */
	if (cs->timedout)
		dma_fence_set_error(cs->fence, -ETIMEDOUT);
		cs->fence->error = -ETIMEDOUT;
	else if (cs->aborted)
		dma_fence_set_error(cs->fence, -EIO);
		cs->fence->error = -EIO;
	else if (!cs->submitted)
		dma_fence_set_error(cs->fence, -EBUSY);

	dma_fence_signal(cs->fence);
	dma_fence_put(cs->fence);
		cs->fence->error = -EBUSY;

	complete_all(&cs->fence->completion);
	hl_fence_put(cs->fence);
	cs_counters_aggregate(hdev, cs->ctx);

	kfree(cs->jobs_in_queue_cnt);
@@ -401,7 +398,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
			enum hl_cs_type cs_type, struct hl_cs **cs_new)
{
	struct hl_cs_compl *cs_cmpl;
	struct dma_fence *other = NULL;
	struct hl_fence *other = NULL;
	struct hl_cs *cs;
	int rc;

@@ -434,9 +431,11 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
	cs_cmpl->cs_seq = ctx->cs_sequence;
	other = ctx->cs_pending[cs_cmpl->cs_seq &
				(hdev->asic_prop.max_pending_cs - 1)];
	if ((other) && (!dma_fence_is_signaled(other))) {
		dev_dbg(hdev->dev,

	if (other && !completion_done(&other->completion)) {
		dev_dbg_ratelimited(hdev->dev,
			"Rejecting CS because of too many in-flights CS\n");
		ctx->cs_counters.max_cs_in_flight_drop_cnt++;
		rc = -EAGAIN;
		goto free_fence;
	}
@@ -448,8 +447,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
		goto free_fence;
	}

	dma_fence_init(&cs_cmpl->base_fence, &hl_fence_ops, &cs_cmpl->lock,
			ctx->asid, ctx->cs_sequence);
	/* init hl_fence */
	hl_fence_init(&cs_cmpl->base_fence);

	cs->sequence = cs_cmpl->cs_seq;

@@ -458,9 +457,9 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
							&cs_cmpl->base_fence;
	ctx->cs_sequence++;

	dma_fence_get(&cs_cmpl->base_fence);
	hl_fence_get(&cs_cmpl->base_fence);

	dma_fence_put(other);
	hl_fence_put(other);

	spin_unlock(&ctx->cs_lock);

@@ -690,7 +689,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
			rc = -ENOMEM;
			if (is_kernel_allocated_cb)
				goto release_cb;
			else

			goto free_cs_object;
		}

@@ -773,7 +772,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
	struct hl_ctx *ctx = hpriv->ctx;
	struct hl_cs_chunk *cs_chunk_array, *chunk;
	struct hw_queue_properties *hw_queue_prop;
	struct dma_fence *sig_fence = NULL;
	struct hl_fence *sig_fence = NULL;
	struct hl_cs_job *job;
	struct hl_cs *cs;
	struct hl_cb *cb;
@@ -883,14 +882,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
			dev_err(hdev->dev,
				"CS seq 0x%llx is not of a signal CS\n",
				signal_seq);
			dma_fence_put(sig_fence);
			hl_fence_put(sig_fence);
			rc = -EINVAL;
			goto free_signal_seq_array;
		}

		if (dma_fence_is_signaled(sig_fence)) {
		if (completion_done(&sig_fence->completion)) {
			/* signal CS already finished */
			dma_fence_put(sig_fence);
			hl_fence_put(sig_fence);
			rc = 0;
			goto free_signal_seq_array;
		}
@@ -902,7 +901,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
	rc = allocate_cs(hdev, ctx, cs_type, &cs);
	if (rc) {
		if (cs_type == CS_TYPE_WAIT)
			dma_fence_put(sig_fence);
			hl_fence_put(sig_fence);
		hl_ctx_put(ctx);
		goto free_signal_seq_array;
	}
@@ -1162,7 +1161,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
static long _hl_cs_wait_ioctl(struct hl_device *hdev,
		struct hl_ctx *ctx, u64 timeout_us, u64 seq)
{
	struct dma_fence *fence;
	struct hl_fence *fence;
	unsigned long timeout;
	long rc;

@@ -1181,12 +1180,18 @@ static long _hl_cs_wait_ioctl(struct hl_device *hdev,
				"Can't wait on CS %llu because current CS is at seq %llu\n",
				seq, ctx->cs_sequence);
	} else if (fence) {
		rc = dma_fence_wait_timeout(fence, true, timeout);
		if (!timeout_us)
			rc = completion_done(&fence->completion);
		else
			rc = wait_for_completion_interruptible_timeout(
					&fence->completion, timeout);

		if (fence->error == -ETIMEDOUT)
			rc = -ETIMEDOUT;
		else if (fence->error == -EIO)
			rc = -EIO;
		dma_fence_put(fence);

		hl_fence_put(fence);
	} else {
		dev_dbg(hdev->dev,
			"Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
Loading