Commit 9cd20d3f authored by Can Guo's avatar Can Guo Committed by Martin K. Petersen
Browse files

scsi: ufs: Protect PM ops and err_handler from user access through sysfs

User layer may access sysfs nodes when system PM ops or error handling is
running. This can cause various problems. Rename eh_sem to host_sem and use
it to protect PM ops and error handling from user layer intervention.

Link: https://lore.kernel.org/r/1610594010-7254-3-git-send-email-cang@codeaurora.org


Reviewed-by: default avatarStanley Chu <stanley.chu@mediatek.com>
Acked-by: default avatarAvri Altman <avri.altman@wdc.com>
Signed-off-by: default avatarCan Guo <cang@codeaurora.org>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent fb7afe24
Loading
Loading
Loading
Loading
+90 −16
Original line number Original line Diff line number Diff line
@@ -154,18 +154,29 @@ static ssize_t auto_hibern8_show(struct device *dev,
				 struct device_attribute *attr, char *buf)
				 struct device_attribute *attr, char *buf)
{
{
	u32 ahit;
	u32 ahit;
	int ret;
	struct ufs_hba *hba = dev_get_drvdata(dev);
	struct ufs_hba *hba = dev_get_drvdata(dev);


	if (!ufshcd_is_auto_hibern8_supported(hba))
	if (!ufshcd_is_auto_hibern8_supported(hba))
		return -EOPNOTSUPP;
		return -EOPNOTSUPP;


	down(&hba->host_sem);
	if (!ufshcd_is_user_access_allowed(hba)) {
		ret = -EBUSY;
		goto out;
	}

	pm_runtime_get_sync(hba->dev);
	pm_runtime_get_sync(hba->dev);
	ufshcd_hold(hba, false);
	ufshcd_hold(hba, false);
	ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
	ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
	ufshcd_release(hba);
	ufshcd_release(hba);
	pm_runtime_put_sync(hba->dev);
	pm_runtime_put_sync(hba->dev);


	return sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
	ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));

out:
	up(&hba->host_sem);
	return ret;
}
}


static ssize_t auto_hibern8_store(struct device *dev,
static ssize_t auto_hibern8_store(struct device *dev,
@@ -174,6 +185,7 @@ static ssize_t auto_hibern8_store(struct device *dev,
{
{
	struct ufs_hba *hba = dev_get_drvdata(dev);
	struct ufs_hba *hba = dev_get_drvdata(dev);
	unsigned int timer;
	unsigned int timer;
	int ret = 0;


	if (!ufshcd_is_auto_hibern8_supported(hba))
	if (!ufshcd_is_auto_hibern8_supported(hba))
		return -EOPNOTSUPP;
		return -EOPNOTSUPP;
@@ -184,9 +196,17 @@ static ssize_t auto_hibern8_store(struct device *dev,
	if (timer > UFSHCI_AHIBERN8_MAX)
	if (timer > UFSHCI_AHIBERN8_MAX)
		return -EINVAL;
		return -EINVAL;


	down(&hba->host_sem);
	if (!ufshcd_is_user_access_allowed(hba)) {
		ret = -EBUSY;
		goto out;
	}

	ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
	ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));


	return count;
out:
	up(&hba->host_sem);
	return ret ? ret : count;
}
}


static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RW(rpm_lvl);
@@ -225,12 +245,21 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
	if (param_size > 8)
	if (param_size > 8)
		return -EINVAL;
		return -EINVAL;


	down(&hba->host_sem);
	if (!ufshcd_is_user_access_allowed(hba)) {
		ret = -EBUSY;
		goto out;
	}

	pm_runtime_get_sync(hba->dev);
	pm_runtime_get_sync(hba->dev);
	ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
	ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
				param_offset, desc_buf, param_size);
				param_offset, desc_buf, param_size);
	pm_runtime_put_sync(hba->dev);
	pm_runtime_put_sync(hba->dev);
	if (ret)
	if (ret) {
		return -EINVAL;
		ret = -EINVAL;
		goto out;
	}

	switch (param_size) {
	switch (param_size) {
	case 1:
	case 1:
		ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
		ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
@@ -249,6 +278,8 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
		break;
		break;
	}
	}


out:
	up(&hba->host_sem);
	return ret;
	return ret;
}
}


@@ -591,9 +622,16 @@ static ssize_t _name##_show(struct device *dev, \
	int desc_len = QUERY_DESC_MAX_SIZE;				\
	int desc_len = QUERY_DESC_MAX_SIZE;				\
	u8 *desc_buf;							\
	u8 *desc_buf;							\
									\
									\
	down(&hba->host_sem);						\
	if (!ufshcd_is_user_access_allowed(hba)) {			\
		up(&hba->host_sem);					\
		return -EBUSY;						\
	}								\
	desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC);		\
	desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC);		\
	if (!desc_buf)                                                  \
	if (!desc_buf) {						\
		up(&hba->host_sem);					\
		return -ENOMEM;						\
		return -ENOMEM;						\
	}								\
	pm_runtime_get_sync(hba->dev);					\
	pm_runtime_get_sync(hba->dev);					\
	ret = ufshcd_query_descriptor_retry(hba,			\
	ret = ufshcd_query_descriptor_retry(hba,			\
		UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE,	\
		UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE,	\
@@ -613,6 +651,7 @@ static ssize_t _name##_show(struct device *dev, \
out:									\
out:									\
	pm_runtime_put_sync(hba->dev);					\
	pm_runtime_put_sync(hba->dev);					\
	kfree(desc_buf);						\
	kfree(desc_buf);						\
	up(&hba->host_sem);						\
	return ret;							\
	return ret;							\
}									\
}									\
static DEVICE_ATTR_RO(_name)
static DEVICE_ATTR_RO(_name)
@@ -651,15 +690,26 @@ static ssize_t _name##_show(struct device *dev, \
	u8 index = 0;							\
	u8 index = 0;							\
	int ret;							\
	int ret;							\
	struct ufs_hba *hba = dev_get_drvdata(dev);			\
	struct ufs_hba *hba = dev_get_drvdata(dev);			\
									\
	down(&hba->host_sem);						\
	if (!ufshcd_is_user_access_allowed(hba)) {			\
		up(&hba->host_sem);					\
		return -EBUSY;						\
	}								\
	if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname))			\
	if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname))			\
		index = ufshcd_wb_get_query_index(hba);			\
		index = ufshcd_wb_get_query_index(hba);			\
	pm_runtime_get_sync(hba->dev);					\
	pm_runtime_get_sync(hba->dev);					\
	ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,	\
	ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,	\
		QUERY_FLAG_IDN##_uname, index, &flag);			\
		QUERY_FLAG_IDN##_uname, index, &flag);			\
	pm_runtime_put_sync(hba->dev);					\
	pm_runtime_put_sync(hba->dev);					\
	if (ret)							\
	if (ret) {							\
		return -EINVAL;						\
		ret = -EINVAL;						\
	return sysfs_emit(buf, "%s\n", flag ? "true" : "false");	\
		goto out;						\
	}								\
	ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false");		\
out:									\
	up(&hba->host_sem);						\
	return ret;							\
}									\
}									\
static DEVICE_ATTR_RO(_name)
static DEVICE_ATTR_RO(_name)


@@ -709,15 +759,26 @@ static ssize_t _name##_show(struct device *dev, \
	u32 value;							\
	u32 value;							\
	int ret;							\
	int ret;							\
	u8 index = 0;							\
	u8 index = 0;							\
									\
	down(&hba->host_sem);						\
	if (!ufshcd_is_user_access_allowed(hba)) {			\
		up(&hba->host_sem);					\
		return -EBUSY;						\
	}								\
	if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname))			\
	if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname))			\
		index = ufshcd_wb_get_query_index(hba);			\
		index = ufshcd_wb_get_query_index(hba);			\
	pm_runtime_get_sync(hba->dev);					\
	pm_runtime_get_sync(hba->dev);					\
	ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,	\
	ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,	\
		QUERY_ATTR_IDN##_uname, index, 0, &value);		\
		QUERY_ATTR_IDN##_uname, index, 0, &value);		\
	pm_runtime_put_sync(hba->dev);					\
	pm_runtime_put_sync(hba->dev);					\
	if (ret)							\
	if (ret) {							\
		return -EINVAL;						\
		ret = -EINVAL;						\
	return sysfs_emit(buf, "0x%08X\n", value);			\
		goto out;						\
	}								\
	ret = sysfs_emit(buf, "0x%08X\n", value);			\
out:									\
	up(&hba->host_sem);						\
	return ret;							\
}									\
}									\
static DEVICE_ATTR_RO(_name)
static DEVICE_ATTR_RO(_name)


@@ -851,13 +912,26 @@ static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
	u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
	u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
	int ret;
	int ret;


	down(&hba->host_sem);
	if (!ufshcd_is_user_access_allowed(hba)) {
		ret = -EBUSY;
		goto out;
	}

	pm_runtime_get_sync(hba->dev);
	pm_runtime_get_sync(hba->dev);
	ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
	ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
		QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
		QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
	pm_runtime_put_sync(hba->dev);
	pm_runtime_put_sync(hba->dev);
	if (ret)
	if (ret) {
		return -EINVAL;
		ret = -EINVAL;
	return sysfs_emit(buf, "0x%08X\n", value);
		goto out;
	}

	ret = sysfs_emit(buf, "0x%08X\n", value);

out:
	up(&hba->host_sem);
	return ret;
}
}
static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
static DEVICE_ATTR_RO(dyn_cap_needed_attribute);


+26 −16
Original line number Original line Diff line number Diff line
@@ -1546,11 +1546,17 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
{
{
	struct ufs_hba *hba = dev_get_drvdata(dev);
	struct ufs_hba *hba = dev_get_drvdata(dev);
	u32 value;
	u32 value;
	int err;
	int err = 0;


	if (kstrtou32(buf, 0, &value))
	if (kstrtou32(buf, 0, &value))
		return -EINVAL;
		return -EINVAL;


	down(&hba->host_sem);
	if (!ufshcd_is_user_access_allowed(hba)) {
		err = -EBUSY;
		goto out;
	}

	value = !!value;
	value = !!value;
	if (value == hba->clk_scaling.is_allowed)
	if (value == hba->clk_scaling.is_allowed)
		goto out;
		goto out;
@@ -1576,7 +1582,8 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
	ufshcd_release(hba);
	ufshcd_release(hba);
	pm_runtime_put_sync(hba->dev);
	pm_runtime_put_sync(hba->dev);
out:
out:
	return count;
	up(&hba->host_sem);
	return err ? err : count;
}
}


static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
@@ -5775,7 +5782,8 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)


static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
{
	return (!hba->is_powered || hba->ufshcd_state == UFSHCD_STATE_ERROR ||
	return (!hba->is_powered || hba->shutting_down ||
		hba->ufshcd_state == UFSHCD_STATE_ERROR ||
		(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
		(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
		   ufshcd_is_link_broken(hba))));
		   ufshcd_is_link_broken(hba))));
}
}
@@ -5847,13 +5855,13 @@ static void ufshcd_err_handler(struct work_struct *work)


	hba = container_of(work, struct ufs_hba, eh_work);
	hba = container_of(work, struct ufs_hba, eh_work);


	down(&hba->eh_sem);
	down(&hba->host_sem);
	spin_lock_irqsave(hba->host->host_lock, flags);
	spin_lock_irqsave(hba->host->host_lock, flags);
	if (ufshcd_err_handling_should_stop(hba)) {
	if (ufshcd_err_handling_should_stop(hba)) {
		if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
		if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
			hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
			hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
		spin_unlock_irqrestore(hba->host->host_lock, flags);
		spin_unlock_irqrestore(hba->host->host_lock, flags);
		up(&hba->eh_sem);
		up(&hba->host_sem);
		return;
		return;
	}
	}
	ufshcd_set_eh_in_progress(hba);
	ufshcd_set_eh_in_progress(hba);
@@ -6022,7 +6030,7 @@ static void ufshcd_err_handler(struct work_struct *work)
	spin_unlock_irqrestore(hba->host->host_lock, flags);
	spin_unlock_irqrestore(hba->host->host_lock, flags);
	ufshcd_scsi_unblock_requests(hba);
	ufshcd_scsi_unblock_requests(hba);
	ufshcd_err_handling_unprepare(hba);
	ufshcd_err_handling_unprepare(hba);
	up(&hba->eh_sem);
	up(&hba->host_sem);
}
}


/**
/**
@@ -7924,10 +7932,10 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
	struct ufs_hba *hba = (struct ufs_hba *)data;
	struct ufs_hba *hba = (struct ufs_hba *)data;
	int ret;
	int ret;


	down(&hba->eh_sem);
	down(&hba->host_sem);
	/* Initialize hba, detect and initialize UFS device */
	/* Initialize hba, detect and initialize UFS device */
	ret = ufshcd_probe_hba(hba, true);
	ret = ufshcd_probe_hba(hba, true);
	up(&hba->eh_sem);
	up(&hba->host_sem);
	if (ret)
	if (ret)
		goto out;
		goto out;


@@ -8959,7 +8967,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
		return 0;
		return 0;
	}
	}


	down(&hba->eh_sem);
	down(&hba->host_sem);


	if (!hba->is_powered)
	if (!hba->is_powered)
		return 0;
		return 0;
@@ -8992,7 +9000,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
	if (!ret)
	if (!ret)
		hba->is_sys_suspended = true;
		hba->is_sys_suspended = true;
	else
	else
		up(&hba->eh_sem);
		up(&hba->host_sem);
	return ret;
	return ret;
}
}
EXPORT_SYMBOL(ufshcd_system_suspend);
EXPORT_SYMBOL(ufshcd_system_suspend);
@@ -9014,7 +9022,7 @@ int ufshcd_system_resume(struct ufs_hba *hba)


	if (unlikely(early_suspend)) {
	if (unlikely(early_suspend)) {
		early_suspend = false;
		early_suspend = false;
		down(&hba->eh_sem);
		down(&hba->host_sem);
	}
	}


	if (!hba->is_powered || pm_runtime_suspended(hba->dev))
	if (!hba->is_powered || pm_runtime_suspended(hba->dev))
@@ -9031,7 +9039,7 @@ int ufshcd_system_resume(struct ufs_hba *hba)
		hba->curr_dev_pwr_mode, hba->uic_link_state);
		hba->curr_dev_pwr_mode, hba->uic_link_state);
	if (!ret)
	if (!ret)
		hba->is_sys_suspended = false;
		hba->is_sys_suspended = false;
	up(&hba->eh_sem);
	up(&hba->host_sem);
	return ret;
	return ret;
}
}
EXPORT_SYMBOL(ufshcd_system_resume);
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -9123,7 +9131,10 @@ int ufshcd_shutdown(struct ufs_hba *hba)
{
{
	int ret = 0;
	int ret = 0;


	down(&hba->eh_sem);
	down(&hba->host_sem);
	hba->shutting_down = true;
	up(&hba->host_sem);

	if (!hba->is_powered)
	if (!hba->is_powered)
		goto out;
		goto out;


@@ -9137,7 +9148,6 @@ int ufshcd_shutdown(struct ufs_hba *hba)
	if (ret)
	if (ret)
		dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
		dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
	hba->is_powered = false;
	hba->is_powered = false;
	up(&hba->eh_sem);
	/* allow force shutdown even in case of errors */
	/* allow force shutdown even in case of errors */
	return 0;
	return 0;
}
}
@@ -9333,7 +9343,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
	INIT_WORK(&hba->eh_work, ufshcd_err_handler);
	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);


	sema_init(&hba->eh_sem, 1);
	sema_init(&hba->host_sem, 1);


	/* Initialize UIC command mutex */
	/* Initialize UIC command mutex */
	mutex_init(&hba->uic_cmd_mutex);
	mutex_init(&hba->uic_cmd_mutex);
+9 −1
Original line number Original line Diff line number Diff line
@@ -665,6 +665,8 @@ struct ufs_hba_variant_params {
 * @intr_mask: Interrupt Mask Bits
 * @intr_mask: Interrupt Mask Bits
 * @ee_ctrl_mask: Exception event control mask
 * @ee_ctrl_mask: Exception event control mask
 * @is_powered: flag to check if HBA is powered
 * @is_powered: flag to check if HBA is powered
 * @shutting_down: flag to check if shutdown has been invoked
 * @host_sem: semaphore used to serialize concurrent contexts
 * @eh_wq: Workqueue that eh_work works on
 * @eh_wq: Workqueue that eh_work works on
 * @eh_work: Worker to handle UFS errors that require s/w attention
 * @eh_work: Worker to handle UFS errors that require s/w attention
 * @eeh_work: Worker to handle exception events
 * @eeh_work: Worker to handle exception events
@@ -761,7 +763,8 @@ struct ufs_hba {
	u32 intr_mask;
	u32 intr_mask;
	u16 ee_ctrl_mask;
	u16 ee_ctrl_mask;
	bool is_powered;
	bool is_powered;
	struct semaphore eh_sem;
	bool shutting_down;
	struct semaphore host_sem;


	/* Work Queues */
	/* Work Queues */
	struct workqueue_struct *eh_wq;
	struct workqueue_struct *eh_wq;
@@ -888,6 +891,11 @@ static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
	return hba->caps & UFSHCD_CAP_WB_EN;
	return hba->caps & UFSHCD_CAP_WB_EN;
}
}


static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
{
	return !hba->shutting_down;
}

#define ufshcd_writel(hba, val, reg)	\
#define ufshcd_writel(hba, val, reg)	\
	writel((val), (hba)->mmio_base + (reg))
	writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg)	\
#define ufshcd_readl(hba, reg)	\