Commit d7de0e51 authored by zhangyuyang's avatar zhangyuyang
Browse files

drivers:misc:sdma-dae add mmu_notifier

kunpeng inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/


CVE: NA

----------------------------------------------------------------------

1. add mmu_notifier to avoid sdma chip register overflow problem.

Fixes: f8eeb398 ("drivers: misc: sdma-dae: support channel management")
Signed-off-by: default avatarzhangyuyang <zhangyuyang31@huawei.com>
parent 4fe4597a
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
config SDMA_DAE
	tristate "SDMA-DAE driver for SDMA hardware"
	depends on ARM64 && NUMA
	select MMU_NOTIFIER
	default n
	help
	  Kernel driver of SDMA hardware.
+361 −2
Original line number Diff line number Diff line
@@ -6,13 +6,20 @@
#include <linux/platform_device.h>
#include <linux/sort.h>
#include <linux/mm.h>
#include <linux/refcount.h>
#include <linux/atomic.h>

#include "sdma_hal.h"
#include "sdma_umem.h"
#include "sdma_auth.h"

#define mn_to_sdma(mn)	container_of(mn, struct hisi_sdma_mn, mn)

static struct hisi_sdma_global_info g_info;

static atomic_t ttl_processes;
static atomic_t exit_processes;

struct hisi_sdma_channel_list {
	struct list_head chn_list;
	u32 chn_idx;
@@ -27,6 +34,14 @@ struct file_open_data {
	struct list_head share_chn_list;
};

struct hisi_sdma_mn {
	uint32_t pid;
	refcount_t refs;
	struct mmu_notifier mn;
	struct file_open_data *data;
	struct list_head list;
};

struct hisi_sdma_numa_domain {
	int idx;
	int pxm;
@@ -200,6 +215,327 @@ void sdma_clear_ida_ref(struct hisi_sdma_channel *pchannel)
	spin_unlock(&pchannel->owner_chn_lock);
}

static bool sdma_wait_hardware_done(struct hisi_sdma_channel *pchannel)
{
	u32 sq_tail, sq_head;
	u32 cnt = 0;

	sq_head = sdma_channel_get_sq_head(pchannel);
	sq_tail = sdma_channel_get_sq_tail(pchannel);
	while (sq_head != sq_tail && cnt <= SDMA_POLL_TIMEOUT) {
		sq_head = sdma_channel_get_sq_head(pchannel);
		sq_tail = sdma_channel_get_sq_tail(pchannel);
		cnt++;
		msleep(SDMA_POLL_DELAY);
	}

	return (cnt <= SDMA_POLL_TIMEOUT);
}

static bool sdma_wait_cq_writeback(struct hisi_sdma_channel *pchannel)
{
	u32 cq_tail, sq_tail;
	u32 cnt = 0;

	cq_tail = sdma_channel_get_cq_tail(pchannel);
	sq_tail = sdma_channel_get_sq_tail(pchannel);
	while (cq_tail != sq_tail && cnt <= SDMA_POLL_TIMEOUT) {
		cq_tail = sdma_channel_get_cq_tail(pchannel);
		cnt++;
		msleep(SDMA_POLL_DELAY);
	}

	return (cnt <= SDMA_POLL_TIMEOUT);
}

static void sdma_pause_channel(struct hisi_sdma_device *psdma_dev)
{
	struct hisi_sdma_channel *pchannel;
	bool flag;
	int i;

	for (i = 0; i < HISI_SDMA_DEFAULT_CHANNEL_NUM; i++) {
		pchannel = psdma_dev->channels + i;
		if (sdma_wait_hardware_done(pchannel) == false) {
			pr_warn("SDMA %u chn %d hardware not finish all sqes!\n",
				psdma_dev->idx, i);
			continue;
		}

		sdma_channel_set_pause(pchannel);
		flag = sdma_wait_cq_writeback(pchannel);
		if (flag)
			sdma_channel_reset_sq_cq(pchannel);
		else
			pr_warn("SDMA %u chn %d hardware not write back all cqes!\n",
				psdma_dev->idx, i);
	}

	return;
}

static void sdma_wait_channel_quiescent(struct hisi_sdma_device *psdma_dev)
{
	struct hisi_sdma_channel *pchannel;
	bool flag;
	int i;

	for (i = 0; i < HISI_SDMA_DEFAULT_CHANNEL_NUM; i++) {
		pchannel = psdma_dev->channels + i;
		if (sdma_channel_is_quiescent(pchannel))
			continue;

		if (sdma_channel_is_paused(pchannel)) {
			sdma_channel_reset_sq_cq(pchannel);
			continue;
		}

		if (sdma_wait_hardware_done(pchannel) == false) {
			pr_warn("SDMA %u chn %d hardware not finish all sqes!\n",
				psdma_dev->idx, i);
			continue;
		}

		sdma_channel_set_pause(pchannel);
		flag = sdma_wait_cq_writeback(pchannel);
		if (flag)
			sdma_channel_reset_sq_cq(pchannel);
		else
			pr_warn("SDMA %u chn %d hardware not write back all cqes!\n",
				psdma_dev->idx, i);
	}

	return;
}

static void sdma_resume_channel(struct hisi_sdma_device *psdma_dev)
{
	struct hisi_sdma_channel *pchannel;
	int i;

	for (i = 0; i < HISI_SDMA_DEFAULT_CHANNEL_NUM; i++) {
		pchannel = psdma_dev->channels + i;
		if (!sdma_channel_is_paused(pchannel)) {
			pr_warn("SDMA %u chn %d not paused\n", psdma_dev->idx, i);
			continue;
		}
		if (!sdma_channel_is_quiescent(pchannel))
			sdma_channel_reset_sq_cq(pchannel);
		if (sdma_channel_is_paused(pchannel) && sdma_channel_is_quiescent(pchannel))
			sdma_channel_write_resume(pchannel);
	}

	return;
}

static void sdma_mmu_release_pause(struct mmu_notifier *mn, struct mm_struct *mm)
{
	struct hisi_sdma_device *psdma_dev;
	struct hisi_sdma_mn *sdma_mn;
	int i;

	sdma_mn = mn_to_sdma(mn);
	if (!sdma_mn->data)
		return;

	mutex_lock(g_info.mutex_lock);
	if (atomic_read(&exit_processes) == 0) {
		atomic_set(&exit_processes, 1);
		pr_warn("SDMA exit exceptionally, stop SDMA tasks before mm exit.\n");
		for (i = 0; i < g_info.core_dev->sdma_device_num; i++) {
			psdma_dev = g_info.core_dev->sdma_devices[i];
			sdma_pause_channel(psdma_dev);
		}
	} else {
		for (i = 0; i < g_info.core_dev->sdma_device_num; i++) {
			psdma_dev = g_info.core_dev->sdma_devices[i];
			sdma_wait_channel_quiescent(psdma_dev);
		}
	}
	mutex_unlock(g_info.mutex_lock);

	return;
}

static void sdma_mmu_release_resume(struct mmu_notifier *mn, struct mm_struct *mm)
{
	struct hisi_sdma_device *psdma_dev;
	struct hisi_sdma_mn *sdma_mn;
	int refcount;
	int i;

	sdma_mn = mn_to_sdma(mn);
	refcount = refcount_read(&sdma_mn->refs);
	atomic_sub(refcount, &ttl_processes);
	if (!sdma_mn->data)
		return;

	if (atomic_read(&ttl_processes) == 0) {
		pr_warn("Now resume SDMA channels after mm exit\n");
		for (i = 0; i < g_info.core_dev->sdma_device_num; i++) {
			psdma_dev = g_info.core_dev->sdma_devices[i];
			sdma_resume_channel(psdma_dev);
		}
		atomic_set(&exit_processes, 0);
	}

	return;
}

static void sdma_mmu_notifier_free(struct mmu_notifier *mn)
{
	kfree(mn_to_sdma(mn));
}

static const struct mmu_notifier_ops sdma_pause_mmu_notifier_ops = {
	.release	= sdma_mmu_release_pause,
	.free_notifier	= sdma_mmu_notifier_free,
};

static const struct mmu_notifier_ops sdma_resume_mmu_notifier_ops = {
	.release	= sdma_mmu_release_resume,
	.free_notifier	= sdma_mmu_notifier_free,
};

static struct hisi_sdma_mn *search_resume_mmu_notifier(struct mm_struct *mm)
{
	struct hisi_sdma_mn *sdma_mn;

	list_for_each_entry(sdma_mn, &g_info.sdma_resume_mm_list, list) {
		if (sdma_mn->mn.mm == mm) {
			refcount_inc(&sdma_mn->refs);
			return sdma_mn;
		}
	}

	return NULL;
}

static struct hisi_sdma_mn *search_pause_mmu_notifier(struct mm_struct *mm)
{
	struct hisi_sdma_mn *sdma_mn;

	list_for_each_entry(sdma_mn, &g_info.sdma_pause_mm_list, list) {
		if (sdma_mn->mn.mm == mm) {
			refcount_inc(&sdma_mn->refs);
			return sdma_mn;
		}
	}

	return NULL;
}

static int sdma_pause_mmu_handler(struct mm_struct *mm, struct file_open_data *data)
{
	struct hisi_sdma_mn *sdma_mn;
	int ret = 0;

	mutex_lock(g_info.mutex_lock);
	sdma_mn = search_pause_mmu_notifier(mm);
	if (sdma_mn) {
		mutex_unlock(g_info.mutex_lock);
		return ret;
	}

	sdma_mn = kzalloc(sizeof(*sdma_mn), GFP_KERNEL);
	if (!sdma_mn) {
		mutex_unlock(g_info.mutex_lock);
		return -ENOMEM;
	}

	refcount_set(&sdma_mn->refs, 1);
	sdma_mn->pid = current->tgid;
	sdma_mn->data = data;
	sdma_mn->mn.ops = &sdma_pause_mmu_notifier_ops;
	ret = mmu_notifier_register(&sdma_mn->mn, mm);
	if (ret) {
		mutex_unlock(g_info.mutex_lock);
		kfree(sdma_mn);
		return ret;
	}

	list_add(&sdma_mn->list, &g_info.sdma_pause_mm_list);
	mutex_unlock(g_info.mutex_lock);

	return ret;
}

static int sdma_resume_mmu_handler(struct mm_struct *mm, struct file_open_data *data)
{
	struct hisi_sdma_mn *sdma_mn;
	int ret = 0;

	mutex_lock(g_info.mutex_lock);
	sdma_mn = search_resume_mmu_notifier(mm);
	if (sdma_mn) {
		mutex_unlock(g_info.mutex_lock);
		return ret;
	}

	sdma_mn = kzalloc(sizeof(*sdma_mn), GFP_KERNEL);
	if (!sdma_mn) {
		mutex_unlock(g_info.mutex_lock);
		return -ENOMEM;
	}

	refcount_set(&sdma_mn->refs, 1);
	sdma_mn->pid = current->tgid;
	sdma_mn->data = data;
	sdma_mn->mn.ops = &sdma_resume_mmu_notifier_ops;
	ret = mmu_notifier_register(&sdma_mn->mn, current->mm);
	if (ret) {
		mutex_unlock(g_info.mutex_lock);
		kfree(sdma_mn);
		return ret;
	}

	list_add(&sdma_mn->list, &g_info.sdma_resume_mm_list);
	mutex_unlock(g_info.mutex_lock);

	return ret;
}

static void sdma_put_mmu_notifier(struct hisi_sdma_mn *sdma_mn)
{
	if (!refcount_dec_and_test(&sdma_mn->refs))
		return;

	list_del(&sdma_mn->list);
	mmu_notifier_put(&sdma_mn->mn);

	return;
}

static void sdma_put_resume_mmu_notifier(void)
{
	struct hisi_sdma_mn *sdma_mn;
	int pid = current->tgid;

	mutex_lock(g_info.mutex_lock);
	list_for_each_entry(sdma_mn, &g_info.sdma_resume_mm_list, list) {
		if (sdma_mn->pid == pid) {
			sdma_put_mmu_notifier(sdma_mn);
			break;
		}
	}
	mutex_unlock(g_info.mutex_lock);
}

static void sdma_put_pause_mmu_notifier(void)
{
	struct hisi_sdma_mn *sdma_mn;
	int pid = current->tgid;

	mutex_lock(g_info.mutex_lock);
	list_for_each_entry(sdma_mn, &g_info.sdma_pause_mm_list, list) {
		if (sdma_mn->pid == pid) {
			sdma_put_mmu_notifier(sdma_mn);
			break;
		}
	}
	mutex_unlock(g_info.mutex_lock);
}

static int __do_sdma_open(struct hisi_sdma_device *psdma_dev, struct file *file)
{
	struct file_open_data *data;
@@ -224,11 +560,15 @@ static int __do_sdma_open(struct hisi_sdma_device *psdma_dev, struct file *file)
		goto free_pid_ref_ht;
	}

	ret = sdma_resume_mmu_handler(current->mm, data);
	if (ret != 0)
		goto free_privt_data;

	handle = iommu_sva_bind_device(&psdma_dev->pdev->dev, current->mm, NULL);
	if (IS_ERR(handle)) {
		dev_err(&psdma_dev->pdev->dev, "failed to bind sva, %ld\n", PTR_ERR(handle));
		ret = (int)PTR_ERR(handle);
		goto free_privt_data;
		goto mmu_resume_unreg;
	}

	pasid = iommu_sva_get_pasid(handle);
@@ -237,6 +577,13 @@ static int __do_sdma_open(struct hisi_sdma_device *psdma_dev, struct file *file)
		goto sva_unbind;
	}

	ret = sdma_pause_mmu_handler(current->mm, data);
	if (ret != 0) {
		goto sva_unbind;
	}

	atomic_add(1, &ttl_processes);

	data->ida = id;
	data->pasid = pasid;
	data->psdma_dev = psdma_dev;
@@ -250,6 +597,8 @@ static int __do_sdma_open(struct hisi_sdma_device *psdma_dev, struct file *file)

sva_unbind:
	iommu_sva_unbind_device(handle);
mmu_resume_unreg:
	sdma_put_resume_mmu_notifier();
free_privt_data:
	kfree(data);
free_pid_ref_ht:
@@ -1169,8 +1518,12 @@ static int sdma_dev_release(struct inode *inode SDMA_UNUSED, struct file *file)
	}
	spin_unlock(&pdev->channel_lock);

	sdma_put_pause_mmu_notifier();
	if (data->handle)
		iommu_sva_unbind_device(data->handle);
	sdma_put_resume_mmu_notifier();
	if (current->mm)
		atomic_sub(1, &ttl_processes);

	sdma_hash_free_entry(data->ida);
	sdma_del_pid_ref(pdev, pid);
@@ -1313,10 +1666,16 @@ void sdma_cdev_init(struct cdev *cdev)
}

void sdma_info_sync_cdev(struct hisi_sdma_core_device *p, u32 *share_chns, struct ida *fd_ida,
			 bool *safe_mode)
			 bool *safe_mode, struct mutex *mutex_lock)
{
	g_info.core_dev = p;
	g_info.fd_ida = fd_ida;
	g_info.share_chns = share_chns;
	g_info.sdma_mode = safe_mode;
	g_info.mutex_lock = mutex_lock;
	INIT_LIST_HEAD(&g_info.sdma_pause_mm_list);
	INIT_LIST_HEAD(&g_info.sdma_resume_mm_list);

	atomic_set(&ttl_processes, 0);
	atomic_set(&exit_processes, 0);
}
+32 −1
Original line number Diff line number Diff line
@@ -8,6 +8,8 @@
#include <linux/types.h>
#include <linux/hashtable.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/mmu_notifier.h>

#include "hisi_sdma.h"
#include "sdma_reg.h"
@@ -18,6 +20,26 @@
#define ALIGN_NUM		1
#define HISI_SDMA_HAL_HASH_BUCKETS_BITS 8

/* HISI_SDMA_POLL_TIMOUT_VAL */
#define SDMA_POLL_ERR_TIMEOUT	110
#define SDMA_POLL_DELAY		1
#define SDMA_POLL_TIMEOUT	80

#define HISI_SDMA_IO_READ32_POLL_TIMEOUT(_addr, _val, _cond, _delay_us, _timeout_us) \
	({ \
		uint32_t __timeout = 0; \
		uint32_t __delay = (_delay_us); \
		while (__timeout < (_timeout_us)) { \
			(_val) = readl(_addr); \
			if (_cond) \
				break; \
			__timeout += (__delay); \
			mdelay(__delay); \
		} \
		(_val) = readl(_addr); \
		(_cond) ? 0 : -SDMA_POLL_ERR_TIMEOUT; \
	})

/**
 * struct hisi_sdma_channel - Information about one channel in the SDMA device
 * @idx: SDMA channel's ID
@@ -108,14 +130,18 @@ struct hisi_sdma_global_info {
	bool *sdma_mode;
	struct hisi_sdma_core_device *core_dev;
	struct ida *fd_ida;
	struct mutex *mutex_lock;
	struct list_head sdma_pause_mm_list;
	struct list_head sdma_resume_mm_list;
};

void sdma_channel_reset_sq_cq(struct hisi_sdma_channel *pchan);
void sdma_clear_pid_ref(struct hisi_sdma_device *psdma_dev);
void sdma_clear_ida_ref(struct hisi_sdma_channel *pchannel);
int sdma_create_dbg_node(struct dentry *sdma_dbgfs_dir);
void sdma_cdev_init(struct cdev *cdev);
void sdma_info_sync_cdev(struct hisi_sdma_core_device *p, u32 *share_chns, struct ida *fd_ida,
			 bool *safe_mode);
			 bool *safe_mode, struct mutex *mutex_lock);
void sdma_info_sync_dbg(struct hisi_sdma_core_device *p, u32 *share_chns);

static inline void chn_set_val(struct hisi_sdma_channel *pchan, int reg, u32 val, u32 mask)
@@ -172,6 +198,11 @@ static inline void sdma_channel_write_reset(struct hisi_sdma_channel *pchan)
	chn_set_val(pchan, HISI_SDMA_CH_TEST_REG, 1, HISI_SDMA_CH_RESET_MSK);
}

static inline void sdma_channel_write_resume(struct hisi_sdma_channel *pchan)
{
	chn_set_val(pchan, HISI_SDMA_CH_TEST_REG, 1, HISI_SDMA_CH_RESUME_MSK);
}

static inline void sdma_channel_enable(struct hisi_sdma_channel *pchan)
{
	chn_set_val(pchan, HISI_SDMA_CH_CTRL_REG, 1, HISI_SDMA_CH_ENABLE_MSK);
+9 −3
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
#include <linux/printk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mutex.h>

#include "sdma_hal.h"
#include "sdma_irq.h"
@@ -29,6 +30,7 @@ module_param(safe_mode, bool, RW_R_R);
MODULE_PARM_DESC(safe_mode, "| 0 - fast_mode| 1 - safe_mode(default)|");

struct ida fd_ida;
struct mutex g_mutex_lock;
struct hisi_sdma_core_device hisi_sdma_core_device = {0};
static struct class *sdma_class;
static struct dentry *sdma_dbgfs_dir;
@@ -88,7 +90,7 @@ static void sdma_channel_init(struct hisi_sdma_channel *pchan)
	sdma_channel_enable(pchan);
}

static void sdma_channel_reset_sq_cq(struct hisi_sdma_channel *pchan)
void sdma_channel_reset_sq_cq(struct hisi_sdma_channel *pchan)
{
	u32 cq_head, cq_tail;

@@ -97,7 +99,7 @@ static void sdma_channel_reset_sq_cq(struct hisi_sdma_channel *pchan)

	while (cq_head != cq_tail) {
		sdma_channel_set_cq_head(pchan, cq_tail);
		msleep(HISI_SDMA_FSM_INTERVAL);
		msleep(SDMA_POLL_DELAY);

		cq_head = sdma_channel_get_cq_head(pchan);
		cq_tail = sdma_channel_get_cq_tail(pchan);
@@ -524,7 +526,8 @@ static int __init sdma_driver_init(void)
	long ret;

	ida_init(&fd_ida);
	sdma_info_sync_cdev(&hisi_sdma_core_device, &share_chns, &fd_ida, &safe_mode);
	sdma_info_sync_cdev(&hisi_sdma_core_device, &share_chns, &fd_ida, &safe_mode,
			    &g_mutex_lock);
	sdma_info_sync_dbg(&hisi_sdma_core_device, &share_chns);

	sdma_class = class_create(THIS_MODULE, "sdma");
@@ -560,6 +563,8 @@ static int __init sdma_driver_init(void)
		goto umem_hash_free;
	}

	mutex_init(&g_mutex_lock);

	return 0;

umem_hash_free:
@@ -580,6 +585,7 @@ static int __init sdma_driver_init(void)

static void __exit sdma_driver_exit(void)
{
	mutex_destroy(&g_mutex_lock);
	sdma_authority_ht_free();
	sdma_hash_free();
	platform_driver_unregister(&sdma_driver);
+5 −0
Original line number Diff line number Diff line
@@ -23,12 +23,17 @@

#define HISI_SDMA_CH_DFX_REG			0x300

/* HISI_SDMA_CH_REGS Registers' Values */
#define HISI_SDMA_CHN_IS_QUIESCENT		0x10
#define HISI_SDMA_CHN_IS_PAUSED			0x08

#define HISI_SDMA_U32_MSK			GENMASK(31, 0)
/* REG_FILED_MASK IN HISI_SDMA_CH_CTRL_REG */
#define HISI_SDMA_CH_ENABLE_MSK			GENMASK(0, 0)

/* REG_FILED_MASK IN HISI_SDMA_CH_TEST_REG */
#define HISI_SDMA_CH_PAUSE_MSK			GENMASK(0, 0)
#define HISI_SDMA_CH_RESUME_MSK			GENMASK(1, 1)
#define HISI_SDMA_CH_RESET_MSK			GENMASK(2, 2)

/* HISI_SDMA_CH_FSM_STATUS VAL */