Unverified Commit 648197ad authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!2496 uacce: some bugfix and cleanup

parents f85620a9 d4a3dce0
Loading
Loading
Loading
Loading
+0 −12
Original line number Diff line number Diff line
@@ -2571,17 +2571,6 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
	return -EINVAL;
}

static enum uacce_dev_state hisi_qm_get_state(struct uacce_device *uacce)
{
	struct hisi_qm *qm = uacce->priv;
	enum qm_state curr;

	curr = atomic_read(&qm->status.flags);
	if (curr == QM_STOP)
		return UACCE_DEV_ERR;

	return UACCE_DEV_NORMAL;
}
static void qm_uacce_api_ver_init(struct hisi_qm *qm)
{
	struct uacce_device *uacce = qm->uacce;
@@ -2758,7 +2747,6 @@ static const struct uacce_ops uacce_qm_ops = {
	.stop_queue = hisi_qm_uacce_stop_queue,
	.mmap = hisi_qm_uacce_mmap,
	.ioctl = hisi_qm_uacce_ioctl,
	.get_dev_state = hisi_qm_get_state,
	.is_q_updated = hisi_qm_is_q_updated,
	.get_isolate_state = hisi_qm_get_isolate_state,
	.isolate_err_threshold_write = hisi_qm_isolate_threshold_write,
+27 −60
Original line number Diff line number Diff line
@@ -18,38 +18,6 @@ static struct uacce_qfile_region noiommu_ss_default_qfr = {
	.type	=	UACCE_QFRT_SS,
};

static int cdev_get(struct device *dev, void *data)
{
	struct uacce_device *uacce;
	struct device **t_dev = data;

	uacce = container_of(dev, struct uacce_device, dev);
	if (uacce->parent == *t_dev) {
		*t_dev = dev;
		return 1;
	}

	return 0;
}

/**
 * dev_to_uacce - Get structure uacce device from its parent device
 * @dev: the device
 */
struct uacce_device *dev_to_uacce(struct device *dev)
{
	struct device **tdev = &dev;
	int ret;

	ret = class_for_each_device(uacce_class, NULL, tdev, cdev_get);
	if (ret) {
		dev = *tdev;
		return container_of(dev, struct uacce_device, dev);
	}
	return NULL;
}
EXPORT_SYMBOL_GPL(dev_to_uacce);

/*
 * If the parent driver or the device disappears, the queue state is invalid and
 * ops are not usable anymore.
@@ -199,11 +167,9 @@ static void uacce_free_dma_buffers(struct uacce_queue *q)
	struct device *pdev = q->uacce->parent;
	int i = 0;

	if (module_refcount(pdev->driver->owner) > 0)
		module_put(pdev->driver->owner);

	if (!qfr->dma_list)
		return;

	while (i < qfr->dma_list[0].total_num) {
		WARN_ON(!qfr->dma_list[i].size || !qfr->dma_list[i].dma);
		dev_dbg(pdev, "free dma qfr (index = %d)\n", i);
@@ -368,18 +334,19 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
static int uacce_fops_release(struct inode *inode, struct file *filep)
{
	struct uacce_queue *q = filep->private_data;
	struct uacce_qfile_region *ss = q->qfrs[UACCE_QFRT_SS];
	struct uacce_device *uacce = q->uacce;
	struct uacce_qfile_region *ss;

	mutex_lock(&uacce->mutex);
	uacce_put_queue(q);
	uacce_unbind_queue(q);
	list_del(&q->list);
	mutex_unlock(&uacce->mutex);
	ss = q->qfrs[UACCE_QFRT_SS];
	if (ss && ss != &noiommu_ss_default_qfr) {
		uacce_free_dma_buffers(q);
		kfree(ss);
	}
	list_del(&q->list);
	mutex_unlock(&uacce->mutex);
	kfree(q);

	return 0;
@@ -403,14 +370,22 @@ static void uacce_vma_close(struct vm_area_struct *vma)

	if (qfr->type == UACCE_QFRT_SS &&
	    atomic_read(&current->active_mm->mm_users) > 0) {
		/*
		 * uacce_vma_close() and uacce_remove() may be executed concurrently.
		 * To avoid accessing the same address at the same time, takes the uacce->mutex.
		 */
		mutex_lock(&uacce->mutex);
		if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
			uacce->ops->stop_queue(q);
		uacce_free_dma_buffers(q);
		kfree(qfr);
		q->qfrs[vma->vm_pgoff] = NULL;
	} else if (qfr->type != UACCE_QFRT_SS) {
		mutex_unlock(&uacce->mutex);
		kfree(qfr);
	} else if (qfr->type != UACCE_QFRT_SS) {
		mutex_lock(&q->mutex);
		q->qfrs[vma->vm_pgoff] = NULL;
		mutex_unlock(&q->mutex);
		kfree(qfr);
	}
}

@@ -522,7 +497,6 @@ static int uacce_alloc_dma_buffers(struct uacce_queue *q,
	if (!slice)
		return -ENOMEM;

	(void)try_module_get(pdev->driver->owner);
	qfr->dma_list = slice;
	for (i = 0; i < ss_num; i++) {
		if (start + max_size > vma->vm_end)
@@ -538,8 +512,8 @@ static int uacce_alloc_dma_buffers(struct uacce_queue *q,
			dev_err(pdev, "get dma slice(sz = %lu,slice num = %d) fail!\n",
			size, i);
			slice[0].total_num = i;
			uacce_free_dma_buffers(q);
			return -ENOMEM;
			ret = -ENOMEM;
			goto free_buffer;
		}
		slice[i].size = (size + PAGE_SIZE - 1) & PAGE_MASK;
		slice[i].total_num = ss_num;
@@ -550,11 +524,15 @@ static int uacce_alloc_dma_buffers(struct uacce_queue *q,
				      &slice[ss_num]);
	if (ret) {
		dev_err(pdev, "failed to sort dma buffers.\n");
		uacce_free_dma_buffers(q);
		return ret;
		goto free_buffer;
	}

	return 0;

free_buffer:
	uacce_free_dma_buffers(q);

	return ret;
}

static int uacce_mmap_dma_buffers(struct uacce_queue *q,
@@ -817,14 +795,6 @@ static ssize_t isolate_strategy_store(struct device *dev, struct device_attribut
	return count;
}

static ssize_t dev_state_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	struct uacce_device *uacce = to_uacce_device(dev);

	return sysfs_emit(buf, "%d\n", uacce->ops->get_dev_state(uacce));
}

static ssize_t node_id_show(struct device *dev,
			    struct device_attribute *attr, char *buf)
{
@@ -861,7 +831,6 @@ static DEVICE_ATTR_RO(region_mmio_size);
static DEVICE_ATTR_RO(region_dus_size);
static DEVICE_ATTR_RO(isolate);
static DEVICE_ATTR_RW(isolate_strategy);
static DEVICE_ATTR_RO(dev_state);
static DEVICE_ATTR_RO(numa_distance);

static struct attribute *uacce_dev_attrs[] = {
@@ -874,7 +843,6 @@ static struct attribute *uacce_dev_attrs[] = {
	&dev_attr_region_dus_size.attr,
	&dev_attr_isolate.attr,
	&dev_attr_isolate_strategy.attr,
	&dev_attr_dev_state.attr,
	&dev_attr_numa_distance.attr,
	NULL,
};
@@ -1047,6 +1015,7 @@ void uacce_remove(struct uacce_device *uacce)
	mutex_lock(&uacce->mutex);
	/* ensure no open queue remains */
	list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
		struct uacce_qfile_region *ss = q->qfrs[UACCE_QFRT_SS];
		/*
		 * Taking q->mutex ensures that fops do not use the defunct
		 * uacce->ops after the queue is disabled.
@@ -1061,6 +1030,8 @@ void uacce_remove(struct uacce_device *uacce)
		 * access the mmaped area while parent device is already removed
		 */
		unmap_mapping_range(q->mapping, 0, 0, 1);
		if (ss && ss != &noiommu_ss_default_qfr)
			uacce_free_dma_buffers(q);
	}

	/* disable sva now since no opened queues */
@@ -1090,12 +1061,8 @@ static int __init uacce_init(void)

	ret = alloc_chrdev_region(&uacce_devt, 0, MINORMASK, UACCE_NAME);
	if (ret)
		goto destroy_class;

	return 0;

destroy_class:
		class_destroy(uacce_class);

	return ret;
}

+0 −17
Original line number Diff line number Diff line
@@ -17,11 +17,6 @@
struct uacce_queue;
struct uacce_device;

struct uacce_err_isolate {
	u32 hw_err_isolate_hz;	/* user cfg freq which triggers isolation */
	atomic_t is_isolate;
};

struct uacce_dma_slice {
	void *kaddr;	/* kernel address for ss */
	dma_addr_t dma;	/* dma address, if created by dma api */
@@ -52,14 +47,11 @@ struct uacce_qfile_region {
 * @start_queue: make the queue start work after get_queue
 * @stop_queue: make the queue stop work before put_queue
 * @is_q_updated: check whether the task is finished
 * @mask_notify: mask the task irq of queue
 * @mmap: mmap addresses of queue to user space
 * @ioctl: ioctl for user space users of the queue
 * @get_isolate_state: get the device state after set the isolate strategy
 * @isolate_err_threshold_write: stored the isolate error threshold to the device
 * @isolate_err_threshold_read: read the isolate error threshold value from the device
 * @reset: reset the WD device
 * @reset_queue: reset the queue
 */
struct uacce_ops {
	int (*get_available_instances)(struct uacce_device *uacce);
@@ -68,7 +60,6 @@ struct uacce_ops {
	void (*put_queue)(struct uacce_queue *q);
	int (*start_queue)(struct uacce_queue *q);
	void (*stop_queue)(struct uacce_queue *q);
	void (*dump_queue)(const struct uacce_queue *q);
	int (*is_q_updated)(struct uacce_queue *q);
	int (*mmap)(struct uacce_queue *q, struct vm_area_struct *vma,
		    struct uacce_qfile_region *qfr);
@@ -77,7 +68,6 @@ struct uacce_ops {
	enum uacce_dev_state (*get_isolate_state)(struct uacce_device *uacce);
	int (*isolate_err_threshold_write)(struct uacce_device *uacce, u32 num);
	u32 (*isolate_err_threshold_read)(struct uacce_device *uacce);
	enum uacce_dev_state (*get_dev_state)(struct uacce_device *uacce);
};

/**
@@ -163,7 +153,6 @@ struct uacce_device {
	struct device dev;
	struct mutex mutex;
	void *priv;
	struct uacce_err_isolate *isolate;
	struct list_head queues;
};

@@ -173,7 +162,6 @@ struct uacce_device *uacce_alloc(struct device *parent,
				 struct uacce_interface *interface);
int uacce_register(struct uacce_device *uacce);
void uacce_remove(struct uacce_device *uacce);
struct uacce_device *dev_to_uacce(struct device *dev);
void uacce_wake_up(struct uacce_queue *q);
#else /* CONFIG_UACCE */

@@ -190,11 +178,6 @@ static inline int uacce_register(struct uacce_device *uacce)
}

static inline void uacce_remove(struct uacce_device *uacce) {}

static inline struct uacce_device *dev_to_uacce(struct device *dev)
{
	return NULL;
}
static inline void uacce_wake_up(struct uacce_queue *q) {}
#endif /* CONFIG_UACCE */