Commit 770f75eb authored by zhangyuyang's avatar zhangyuyang
Browse files

drivers:misc:sdma-dae remove lock while pausing channels

kunpeng inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/


CVE: NA

----------------------------------------------------------------------

1. remove mutex lock to avoid software lockup problems.

Fixes: f8eeb398 ("drivers: misc: sdma-dae: support channel management")
Signed-off-by: default avatarzhangyuyang <zhangyuyang31@huawei.com>
parent d7de0e51
Loading
Loading
Loading
Loading
+24 −35
Original line number Diff line number Diff line
@@ -248,57 +248,49 @@ static bool sdma_wait_cq_writeback(struct hisi_sdma_channel *pchannel)
	return (cnt <= SDMA_POLL_TIMEOUT);
}

static void sdma_pause_channel(struct hisi_sdma_device *psdma_dev)
static void sdma_pause_single_channel(struct hisi_sdma_channel *pchannel,
				      struct hisi_sdma_device *psdma_dev)
{
	struct hisi_sdma_channel *pchannel;
	bool flag;
	int i;
	u16 idx = pchannel->idx;

	for (i = 0; i < HISI_SDMA_DEFAULT_CHANNEL_NUM; i++) {
		pchannel = psdma_dev->channels + i;
		if (sdma_wait_hardware_done(pchannel) == false) {
			pr_warn("SDMA %u chn %d hardware not finish all sqes!\n",
				psdma_dev->idx, i);
			continue;
		}
	if (sdma_wait_hardware_done(pchannel) == false)
		pr_warn("SDMA %u chn %hu hardware not finish all sqes!\n",
			psdma_dev->idx, idx);

	sdma_channel_set_pause(pchannel);
		flag = sdma_wait_cq_writeback(pchannel);
		if (flag)
	if (sdma_wait_cq_writeback(pchannel))
		sdma_channel_reset_sq_cq(pchannel);
	else
			pr_warn("SDMA %u chn %d hardware not write back all cqes!\n",
				psdma_dev->idx, i);
	}
		pr_warn("SDMA %u chn %hu hardware not write back all cqes!\n",
			psdma_dev->idx, idx);

	return;
}

static void sdma_wait_channel_quiescent(struct hisi_sdma_device *psdma_dev)
static void sdma_pause_channels(struct hisi_sdma_device *psdma_dev)
{
	struct hisi_sdma_channel *pchannel;
	bool flag;
	int i;

	for (i = 0; i < HISI_SDMA_DEFAULT_CHANNEL_NUM; i++) {
		pchannel = psdma_dev->channels + i;
		if (sdma_channel_is_quiescent(pchannel))
			continue;
		sdma_pause_single_channel(pchannel, psdma_dev);
	}

		if (sdma_channel_is_paused(pchannel)) {
			sdma_channel_reset_sq_cq(pchannel);
			continue;
	return;
}

		if (sdma_wait_hardware_done(pchannel) == false) {
			pr_warn("SDMA %u chn %d hardware not finish all sqes!\n",
				psdma_dev->idx, i);
static void sdma_wait_channel_quiescent(struct hisi_sdma_device *psdma_dev)
{
	struct hisi_sdma_channel *pchannel;
	int i;

	for (i = 0; i < HISI_SDMA_DEFAULT_CHANNEL_NUM; i++) {
		pchannel = psdma_dev->channels + i;
		if (sdma_channel_is_quiescent(pchannel))
			continue;
		}

		sdma_channel_set_pause(pchannel);
		flag = sdma_wait_cq_writeback(pchannel);
		if (flag)
		if (sdma_wait_cq_writeback(pchannel))
			sdma_channel_reset_sq_cq(pchannel);
		else
			pr_warn("SDMA %u chn %d hardware not write back all cqes!\n",
@@ -338,13 +330,12 @@ static void sdma_mmu_release_pause(struct mmu_notifier *mn, struct mm_struct *mm
	if (!sdma_mn->data)
		return;

	mutex_lock(g_info.mutex_lock);
	if (atomic_read(&exit_processes) == 0) {
		atomic_set(&exit_processes, 1);
		pr_warn("SDMA exit exceptionally, stop SDMA tasks before mm exit.\n");
		for (i = 0; i < g_info.core_dev->sdma_device_num; i++) {
			psdma_dev = g_info.core_dev->sdma_devices[i];
			sdma_pause_channel(psdma_dev);
			sdma_pause_channels(psdma_dev);
		}
	} else {
		for (i = 0; i < g_info.core_dev->sdma_device_num; i++) {
@@ -352,7 +343,6 @@ static void sdma_mmu_release_pause(struct mmu_notifier *mn, struct mm_struct *mm
			sdma_wait_channel_quiescent(psdma_dev);
		}
	}
	mutex_unlock(g_info.mutex_lock);

	return;
}
@@ -578,9 +568,8 @@ static int __do_sdma_open(struct hisi_sdma_device *psdma_dev, struct file *file)
	}

	ret = sdma_pause_mmu_handler(current->mm, data);
	if (ret != 0) {
	if (ret != 0)
		goto sva_unbind;
	}

	atomic_add(1, &ttl_processes);