Commit ef6a0f6c authored by Tomer Tayar's avatar Tomer Tayar Committed by Oded Gabbay
Browse files

habanalabs: Add an option to map CB to device MMU



There are cases in which the device should access the host memory of a
CB through the device MMU, and thus this memory should be mapped.
The patch adds a flag to the CB IOCTL, in which a user can ask the
driver to perform the mapping when creating a CB.
The mapping is allowed only if a dedicated VA range was allocated for
the specific ASIC.

Signed-off-by: default avatarTomer Tayar <ttayar@habana.ai>
Reviewed-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent fa8641a1
Loading
Loading
Loading
Loading
+192 −4
Original line number Diff line number Diff line
@@ -13,6 +13,131 @@
#include <linux/uaccess.h>
#include <linux/genalloc.h>

static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
	struct hl_device *hdev = ctx->hdev;
	struct asic_fixed_properties *prop = &hdev->asic_prop;
	struct hl_vm_va_block *va_block, *tmp;
	dma_addr_t bus_addr;
	u64 virt_addr;
	u32 page_size = prop->pmmu.page_size;
	s32 offset;
	int rc;

	if (!hdev->supports_cb_mapping) {
		dev_err_ratelimited(hdev->dev,
				"Cannot map CB because no VA range is allocated for CB mapping\n");
		return -EINVAL;
	}

	if (!hdev->mmu_enable) {
		dev_err_ratelimited(hdev->dev,
				"Cannot map CB because MMU is disabled\n");
		return -EINVAL;
	}

	INIT_LIST_HEAD(&cb->va_block_list);

	for (bus_addr = cb->bus_address;
			bus_addr < cb->bus_address + cb->size;
			bus_addr += page_size) {

		virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
		if (!virt_addr) {
			dev_err(hdev->dev,
				"Failed to allocate device virtual address for CB\n");
			rc = -ENOMEM;
			goto err_va_pool_free;
		}

		va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
		if (!va_block) {
			rc = -ENOMEM;
			gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
			goto err_va_pool_free;
		}

		va_block->start = virt_addr;
		va_block->end = virt_addr + page_size;
		va_block->size = page_size;
		list_add_tail(&va_block->node, &cb->va_block_list);
	}

	mutex_lock(&ctx->mmu_lock);

	bus_addr = cb->bus_address;
	offset = 0;
	list_for_each_entry(va_block, &cb->va_block_list, node) {
		rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size,
				list_is_last(&va_block->node,
						&cb->va_block_list));
		if (rc) {
			dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
				va_block->start);
			goto err_va_umap;
		}

		bus_addr += va_block->size;
		offset += va_block->size;
	}

	hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);

	mutex_unlock(&ctx->mmu_lock);

	cb->is_mmu_mapped = true;

	return 0;

err_va_umap:
	list_for_each_entry(va_block, &cb->va_block_list, node) {
		if (offset <= 0)
			break;
		hl_mmu_unmap(ctx, va_block->start, va_block->size,
				offset <= va_block->size);
		offset -= va_block->size;
	}

	hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);

	mutex_unlock(&ctx->mmu_lock);

err_va_pool_free:
	list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
		gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
		list_del(&va_block->node);
		kfree(va_block);
	}

	return rc;
}

static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
{
	struct hl_device *hdev = ctx->hdev;
	struct hl_vm_va_block *va_block, *tmp;

	mutex_lock(&ctx->mmu_lock);

	list_for_each_entry(va_block, &cb->va_block_list, node)
		if (hl_mmu_unmap(ctx, va_block->start, va_block->size,
				list_is_last(&va_block->node,
						&cb->va_block_list)))
			dev_warn_ratelimited(hdev->dev,
					"Failed to unmap CB's va 0x%llx\n",
					va_block->start);

	hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);

	mutex_unlock(&ctx->mmu_lock);

	list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
		gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
		list_del(&va_block->node);
		kfree(va_block);
	}
}

static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
{
	if (cb->is_internal)
@@ -47,6 +172,9 @@ static void cb_release(struct kref *ref)

	hl_debugfs_remove_cb(cb);

	if (cb->is_mmu_mapped)
		cb_unmap_mem(cb->ctx, cb);

	hl_ctx_put(cb->ctx);

	cb_do_release(hdev, cb);
@@ -110,7 +238,7 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,

int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
			struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
			u64 *handle)
			bool map_cb, u64 *handle)
{
	struct hl_cb *cb;
	bool alloc_new_cb = true;
@@ -169,13 +297,26 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
	cb->ctx = ctx;
	hl_ctx_get(hdev, cb->ctx);

	if (map_cb) {
		if (ctx_id == HL_KERNEL_ASID_ID) {
			dev_err(hdev->dev,
				"CB mapping is not supported for kernel context\n");
			rc = -EINVAL;
			goto release_cb;
		}

		rc = cb_map_mem(ctx, cb);
		if (rc)
			goto release_cb;
	}

	spin_lock(&mgr->cb_lock);
	rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
	spin_unlock(&mgr->cb_lock);

	if (rc < 0) {
		dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
		goto release_cb;
		goto unmap_mem;
	}

	cb->id = (u64) rc;
@@ -194,6 +335,9 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,

	return 0;

unmap_mem:
	if (cb->is_mmu_mapped)
		cb_unmap_mem(cb->ctx, cb);
release_cb:
	hl_ctx_put(cb->ctx);
	cb_do_release(hdev, cb);
@@ -256,7 +400,9 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
			rc = -EINVAL;
		} else {
			rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
					args->in.cb_size, false, &handle);
					args->in.cb_size, false,
					!!(args->in.flags & HL_CB_FLAGS_MAP),
					&handle);
		}

		memset(args, 0, sizeof(*args));
@@ -442,7 +588,7 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
	int rc;

	rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
				internal_cb, &cb_handle);
				internal_cb, false, &cb_handle);
	if (rc) {
		dev_err(hdev->dev,
			"Failed to allocate CB for the kernel driver %d\n", rc);
@@ -498,3 +644,45 @@ int hl_cb_pool_fini(struct hl_device *hdev)

	return 0;
}

int hl_cb_va_pool_init(struct hl_ctx *ctx)
{
	struct hl_device *hdev = ctx->hdev;
	struct asic_fixed_properties *prop = &hdev->asic_prop;
	int rc;

	if (!hdev->supports_cb_mapping)
		return 0;

	ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
	if (!ctx->cb_va_pool) {
		dev_err(hdev->dev,
			"Failed to create VA gen pool for CB mapping\n");
		return -ENOMEM;
	}

	rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
			prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
	if (rc) {
		dev_err(hdev->dev,
			"Failed to add memory to VA gen pool for CB mapping\n");
		goto err_pool_destroy;
	}

	return 0;

err_pool_destroy:
	gen_pool_destroy(ctx->cb_va_pool);

	return rc;
}

void hl_cb_va_pool_fini(struct hl_ctx *ctx)
{
	struct hl_device *hdev = ctx->hdev;

	if (!hdev->supports_cb_mapping)
		return;

	gen_pool_destroy(ctx->cb_va_pool);
}
+11 −1
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
		if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
			hl_device_set_debug_mode(hdev, false);

		hl_cb_va_pool_fini(ctx);
		hl_vm_ctx_fini(ctx);
		hl_asid_free(hdev, ctx->asid);
	} else {
@@ -155,15 +156,24 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
			goto err_asid_free;
		}

		rc = hl_cb_va_pool_init(ctx);
		if (rc) {
			dev_err(hdev->dev,
				"Failed to init VA pool for mapped CB\n");
			goto err_vm_ctx_fini;
		}

		rc = hdev->asic_funcs->ctx_init(ctx);
		if (rc) {
			dev_err(hdev->dev, "ctx_init failed\n");
			goto err_vm_ctx_fini;
			goto err_cb_va_pool_fini;
		}
	}

	return 0;

err_cb_va_pool_fini:
	hl_cb_va_pool_fini(ctx);
err_vm_ctx_fini:
	hl_vm_ctx_fini(ctx);
err_asid_free:
+19 −1
Original line number Diff line number Diff line
@@ -291,6 +291,10 @@ struct hl_mmu_properties {
 * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
 * @mmu_dram_default_page_addr: DRAM default page physical address.
 * @cb_va_start_addr: virtual start address of command buffers which are mapped
 *                    to the device's MMU.
 * @cb_va_end_addr: virtual end address of command buffers which are mapped to
 *                  the device's MMU.
 * @mmu_pgt_size: MMU page tables total size.
 * @mmu_pte_size: PTE size in MMU page tables.
 * @mmu_hop_table_size: MMU hop table size.
@@ -339,6 +343,8 @@ struct asic_fixed_properties {
	u64				pcie_aux_dbi_reg_addr;
	u64				mmu_pgt_addr;
	u64				mmu_dram_default_page_addr;
	u64				cb_va_start_addr;
	u64				cb_va_end_addr;
	u32				mmu_pgt_size;
	u32				mmu_pte_size;
	u32				mmu_hop_table_size;
@@ -421,6 +427,8 @@ struct hl_cb_mgr {
 * @lock: spinlock to protect mmap/cs flows.
 * @debugfs_list: node in debugfs list of command buffers.
 * @pool_list: node in pool list of command buffers.
 * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
 *                 the device's MMU.
 * @id: the CB's ID.
 * @kernel_address: Holds the CB's kernel virtual address.
 * @bus_address: Holds the CB's DMA address.
@@ -430,6 +438,7 @@ struct hl_cb_mgr {
 * @mmap: true if the CB is currently mmaped to user.
 * @is_pool: true if CB was acquired from the pool, false otherwise.
 * @is_internal: internaly allocated
 * @is_mmu_mapped: true if the CB is mapped to the device's MMU.
 */
struct hl_cb {
	struct kref		refcount;
@@ -438,6 +447,7 @@ struct hl_cb {
	spinlock_t		lock;
	struct list_head	debugfs_list;
	struct list_head	pool_list;
	struct list_head	va_block_list;
	u64			id;
	u64			kernel_address;
	dma_addr_t		bus_address;
@@ -447,6 +457,7 @@ struct hl_cb {
	u8			mmap;
	u8			is_pool;
	u8			is_internal;
	u8			is_mmu_mapped;
};


@@ -843,6 +854,8 @@ struct hl_va_range {
 * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
 *            MMU hash or walking the PGT requires talking this lock.
 * @debugfs_list: node in debugfs list of contexts.
 * @cb_va_pool: device VA pool for command buffers which are mapped to the
 *              device's MMU.
 * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
 *			to user so user could inquire about CS. It is used as
 *			index to cs_pending array.
@@ -874,6 +887,7 @@ struct hl_ctx {
	struct mutex		mmu_lock;
	struct list_head	debugfs_list;
	struct hl_cs_counters	cs_counters;
	struct gen_pool		*cb_va_pool;
	u64			cs_sequence;
	u64			*dram_default_hops;
	spinlock_t		cs_lock;
@@ -1574,6 +1588,7 @@ struct hl_mmu_funcs {
 * @sync_stream_queue_idx: helper index for sync stream queues initialization.
 * @supports_coresight: is CoreSight supported.
 * @supports_soft_reset: is soft reset supported.
 * @supports_cb_mapping: is mapping a CB to the device's MMU supported.
 */
struct hl_device {
	struct pci_dev			*pdev;
@@ -1673,6 +1688,7 @@ struct hl_device {
	u8				sync_stream_queue_idx;
	u8				supports_coresight;
	u8				supports_soft_reset;
	u8				supports_cb_mapping;

	/* Parameters for bring-up */
	u8				mmu_enable;
@@ -1840,7 +1856,7 @@ void hl_hwmon_fini(struct hl_device *hdev);

int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
			struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
			u64 *handle);
			bool map_cb, u64 *handle);
int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
struct hl_cb *hl_cb_get(struct hl_device *hdev,	struct hl_cb_mgr *mgr,
@@ -1852,6 +1868,8 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
					bool internal_cb);
int hl_cb_pool_init(struct hl_device *hdev);
int hl_cb_pool_fini(struct hl_device *hdev);
int hl_cb_va_pool_init(struct hl_ctx *ctx);
void hl_cb_va_pool_fini(struct hl_ctx *ctx);

void hl_cs_rollback_all(struct hl_device *hdev);
struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
+2 −2
Original line number Diff line number Diff line
@@ -4115,7 +4115,7 @@ static int gaudi_parse_cb_mmu(struct hl_device *hdev,
			sizeof(struct packet_msg_prot) * 2;

	rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
				parser->patched_cb_size, false,
				parser->patched_cb_size, false, false,
				&patched_cb_handle);

	if (rc) {
@@ -4189,7 +4189,7 @@ static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
		goto free_userptr;

	rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
				parser->patched_cb_size, false,
				parser->patched_cb_size, false, false,
				&patched_cb_handle);
	if (rc) {
		dev_err(hdev->dev,
+2 −2
Original line number Diff line number Diff line
@@ -3811,7 +3811,7 @@ static int goya_parse_cb_mmu(struct hl_device *hdev,
			sizeof(struct packet_msg_prot) * 2;

	rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
				parser->patched_cb_size, false,
				parser->patched_cb_size, false, false,
				&patched_cb_handle);

	if (rc) {
@@ -3885,7 +3885,7 @@ static int goya_parse_cb_no_mmu(struct hl_device *hdev,
		goto free_userptr;

	rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
				parser->patched_cb_size, false,
				parser->patched_cb_size, false, false,
				&patched_cb_handle);
	if (rc) {
		dev_err(hdev->dev,
Loading