Unverified Commit 069e6762 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!6515 fix CVE-2023-52498

Merge Pull Request from: @ci-robot 
 
PR sync from: Yang Yingliang <yangyingliang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/APMA4LEPLGSPPTWEHQA7JXOTAFWTLMQO/ 
Fix CVE-2023-52498.

Bjorn Helgaas (1):
  PM: sleep: Use dev_printk() when possible

Li zeming (1):
  PM: core: Remove unnecessary (void *) conversions

Rafael J. Wysocki (5):
  async: Split async_schedule_node_domain()
  async: Introduce async_schedule_dev_nocall()
  PM: sleep: Avoid calling put_device() under dpm_list_mtx
  PM: sleep: Fix possible deadlocks in core system-wide PM code
  PM: sleep: Fix error handling in dpm_prepare()


-- 
2.25.1
 
https://gitee.com/src-openeuler/kernel/issues/I97NIA 
 
Link:https://gitee.com/openeuler/kernel/pulls/6515

 

Reviewed-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 8c2a4928 22fdba55
Loading
Loading
Loading
Loading
+135 −116
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@
 */

#define pr_fmt(fmt) "PM: " fmt
#define dev_fmt pr_fmt

#include <linux/device.h>
#include <linux/export.h>
@@ -449,8 +450,8 @@ static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
			int error)
{
	pr_err("Device %s failed to %s%s: error %d\n",
	       dev_name(dev), pm_verb(state.event), info, error);
	dev_err(dev, "failed to %s%s: error %d\n", pm_verb(state.event), info,
		error);
}

static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
@@ -582,7 +583,7 @@ bool dev_pm_skip_resume(struct device *dev)
}

/**
 * device_resume_noirq - Execute a "noirq resume" callback for given device.
 * __device_resume_noirq - Execute a "noirq resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 * @async: If true, the device is being resumed asynchronously.
@@ -590,7 +591,7 @@ bool dev_pm_skip_resume(struct device *dev)
 * The driver of @dev will not receive interrupts while this function is being
 * executed.
 */
static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
static void __device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
	pm_callback_t callback = NULL;
	const char *info = NULL;
@@ -658,7 +659,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
Out:
	complete_all(&dev->power.completion);
	TRACE_RESUME(error);
	return error;

	if (error) {
		suspend_stats.failed_resume_noirq++;
		dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
	}
}

static bool is_async(struct device *dev)
@@ -671,27 +678,35 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
{
	reinit_completion(&dev->power.completion);

	if (is_async(dev)) {
	if (!is_async(dev))
		return false;

	get_device(dev);
		async_schedule_dev(func, dev);

	if (async_schedule_dev_nocall(func, dev))
		return true;
	}

	put_device(dev);

	return false;
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_noirq(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	struct device *dev = data;

	__device_resume_noirq(dev, pm_transition, true);
	put_device(dev);
}

static void device_resume_noirq(struct device *dev)
{
	if (dpm_async_fn(dev, async_resume_noirq))
		return;

	__device_resume_noirq(dev, pm_transition, false);
}

static void dpm_noirq_resume_devices(pm_message_t state)
{
	struct device *dev;
@@ -701,34 +716,18 @@ static void dpm_noirq_resume_devices(pm_message_t state)
	mutex_lock(&dpm_list_mtx);
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
		dpm_async_fn(dev, async_resume_noirq);

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_late_early_list);

		mutex_unlock(&dpm_list_mtx);

		if (!is_async(dev)) {
			int error;
		device_resume_noirq(dev);

			error = device_resume_noirq(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_noirq++;
				dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " noirq", error);
			}
		}
		put_device(dev);

		mutex_lock(&dpm_list_mtx);
		put_device(dev);
	}
	mutex_unlock(&dpm_list_mtx);
	async_synchronize_full();
@@ -754,14 +753,14 @@ void dpm_resume_noirq(pm_message_t state)
}

/**
 * device_resume_early - Execute an "early resume" callback for given device.
 * __device_resume_early - Execute an "early resume" callback for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 * @async: If true, the device is being resumed asynchronously.
 *
 * Runtime PM is disabled for @dev while this function is being executed.
 */
static int device_resume_early(struct device *dev, pm_message_t state, bool async)
static void __device_resume_early(struct device *dev, pm_message_t state, bool async)
{
	pm_callback_t callback = NULL;
	const char *info = NULL;
@@ -814,21 +813,31 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn

	pm_runtime_enable(dev);
	complete_all(&dev->power.completion);
	return error;

	if (error) {
		suspend_stats.failed_resume_early++;
		dpm_save_failed_step(SUSPEND_RESUME_EARLY);
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, state, async ? " async early" : " early", error);
	}
}

static void async_resume_early(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;

	error = device_resume_early(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	struct device *dev = data;

	__device_resume_early(dev, pm_transition, true);
	put_device(dev);
}

static void device_resume_early(struct device *dev)
{
	if (dpm_async_fn(dev, async_resume_early))
		return;

	__device_resume_early(dev, pm_transition, false);
}

/**
 * dpm_resume_early - Execute "early resume" callbacks for all devices.
 * @state: PM transition of the system being carried out.
@@ -842,33 +851,18 @@ void dpm_resume_early(pm_message_t state)
	mutex_lock(&dpm_list_mtx);
	pm_transition = state;

	/*
	 * Advanced the async threads upfront,
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
		dpm_async_fn(dev, async_resume_early);

	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
		get_device(dev);
		list_move_tail(&dev->power.entry, &dpm_suspended_list);

		mutex_unlock(&dpm_list_mtx);

		if (!is_async(dev)) {
			int error;
		device_resume_early(dev);

			error = device_resume_early(dev, state, false);
			if (error) {
				suspend_stats.failed_resume_early++;
				dpm_save_failed_step(SUSPEND_RESUME_EARLY);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, " early", error);
			}
		}
		mutex_lock(&dpm_list_mtx);
		put_device(dev);

		mutex_lock(&dpm_list_mtx);
	}
	mutex_unlock(&dpm_list_mtx);
	async_synchronize_full();
@@ -888,12 +882,12 @@ void dpm_resume_start(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_start);

/**
 * device_resume - Execute "resume" callbacks for given device.
 * __device_resume - Execute "resume" callbacks for given device.
 * @dev: Device to handle.
 * @state: PM transition of the system being carried out.
 * @async: If true, the device is being resumed asynchronously.
 */
static int device_resume(struct device *dev, pm_message_t state, bool async)
static void __device_resume(struct device *dev, pm_message_t state, bool async)
{
	pm_callback_t callback = NULL;
	const char *info = NULL;
@@ -975,20 +969,30 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)

	TRACE_RESUME(error);

	return error;
	if (error) {
		suspend_stats.failed_resume++;
		dpm_save_failed_step(SUSPEND_RESUME);
		dpm_save_failed_dev(dev_name(dev));
		pm_dev_err(dev, state, async ? " async" : "", error);
	}
}

static void async_resume(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	int error;
	struct device *dev = data;

	error = device_resume(dev, pm_transition, true);
	if (error)
		pm_dev_err(dev, pm_transition, " async", error);
	__device_resume(dev, pm_transition, true);
	put_device(dev);
}

static void device_resume(struct device *dev)
{
	if (dpm_async_fn(dev, async_resume))
		return;

	__device_resume(dev, pm_transition, false);
}

/**
 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
 * @state: PM transition of the system being carried out.
@@ -1008,30 +1012,25 @@ void dpm_resume(pm_message_t state)
	pm_transition = state;
	async_error = 0;

	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
		dpm_async_fn(dev, async_resume);

	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);

		get_device(dev);
		if (!is_async(dev)) {
			int error;

		mutex_unlock(&dpm_list_mtx);

			error = device_resume(dev, state, false);
			if (error) {
				suspend_stats.failed_resume++;
				dpm_save_failed_step(SUSPEND_RESUME);
				dpm_save_failed_dev(dev_name(dev));
				pm_dev_err(dev, state, "", error);
			}
		device_resume(dev);

		mutex_lock(&dpm_list_mtx);
		}

		if (!list_empty(&dev->power.entry))
			list_move_tail(&dev->power.entry, &dpm_prepared_list);

		mutex_unlock(&dpm_list_mtx);

		put_device(dev);

		mutex_lock(&dpm_list_mtx);
	}
	mutex_unlock(&dpm_list_mtx);
	async_synchronize_full();
@@ -1109,14 +1108,16 @@ void dpm_complete(pm_message_t state)
		get_device(dev);
		dev->power.is_prepared = false;
		list_move(&dev->power.entry, &list);

		mutex_unlock(&dpm_list_mtx);

		trace_device_pm_callback_start(dev, "", state.event);
		device_complete(dev, state);
		trace_device_pm_callback_end(dev, 0);

		mutex_lock(&dpm_list_mtx);
		put_device(dev);

		mutex_lock(&dpm_list_mtx);
	}
	list_splice(&list, &dpm_list);
	mutex_unlock(&dpm_list_mtx);
@@ -1262,7 +1263,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a

static void async_suspend_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	struct device *dev = data;
	int error;

	error = __device_suspend_noirq(dev, pm_transition, true);
@@ -1301,17 +1302,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
		error = device_suspend_noirq(dev);

		mutex_lock(&dpm_list_mtx);

		if (error) {
			pm_dev_err(dev, state, " noirq", error);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
		} else if (!list_empty(&dev->power.entry)) {
			list_move(&dev->power.entry, &dpm_noirq_list);
		}

		mutex_unlock(&dpm_list_mtx);

		put_device(dev);

		if (async_error)
		mutex_lock(&dpm_list_mtx);

		if (error || async_error)
			break;
	}
	mutex_unlock(&dpm_list_mtx);
@@ -1441,7 +1446,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as

static void async_suspend_late(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	struct device *dev = data;
	int error;

	error = __device_suspend_late(dev, pm_transition, true);
@@ -1478,23 +1483,28 @@ int dpm_suspend_late(pm_message_t state)
		struct device *dev = to_device(dpm_suspended_list.prev);

		get_device(dev);

		mutex_unlock(&dpm_list_mtx);

		error = device_suspend_late(dev);

		mutex_lock(&dpm_list_mtx);

		if (!list_empty(&dev->power.entry))
			list_move(&dev->power.entry, &dpm_late_early_list);

		if (error) {
			pm_dev_err(dev, state, " late", error);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}

		mutex_unlock(&dpm_list_mtx);

		put_device(dev);

		if (async_error)
		mutex_lock(&dpm_list_mtx);

		if (error || async_error)
			break;
	}
	mutex_unlock(&dpm_list_mtx);
@@ -1712,7 +1722,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)

static void async_suspend(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
	struct device *dev = data;
	int error;

	error = __device_suspend(dev, pm_transition, true);
@@ -1754,21 +1764,27 @@ int dpm_suspend(pm_message_t state)
		struct device *dev = to_device(dpm_prepared_list.prev);

		get_device(dev);

		mutex_unlock(&dpm_list_mtx);

		error = device_suspend(dev);

		mutex_lock(&dpm_list_mtx);

		if (error) {
			pm_dev_err(dev, state, "", error);
			dpm_save_failed_dev(dev_name(dev));
			put_device(dev);
			break;
		}
		if (!list_empty(&dev->power.entry))
		} else if (!list_empty(&dev->power.entry)) {
			list_move(&dev->power.entry, &dpm_suspended_list);
		}

		mutex_unlock(&dpm_list_mtx);

		put_device(dev);
		if (async_error)

		mutex_lock(&dpm_list_mtx);

		if (error || async_error)
			break;
	}
	mutex_unlock(&dpm_list_mtx);
@@ -1881,10 +1897,11 @@ int dpm_prepare(pm_message_t state)
	device_block_probing();

	mutex_lock(&dpm_list_mtx);
	while (!list_empty(&dpm_list)) {
	while (!list_empty(&dpm_list) && !error) {
		struct device *dev = to_device(dpm_list.next);

		get_device(dev);

		mutex_unlock(&dpm_list_mtx);

		trace_device_pm_callback_start(dev, "", state.event);
@@ -1892,21 +1909,23 @@ int dpm_prepare(pm_message_t state)
		trace_device_pm_callback_end(dev, error);

		mutex_lock(&dpm_list_mtx);
		if (error) {
			if (error == -EAGAIN) {
				put_device(dev);
				error = 0;
				continue;
			}
			pr_info("Device %s not prepared for power transition: code %d\n",
				dev_name(dev), error);
			put_device(dev);
			break;
		}

		if (!error) {
			dev->power.is_prepared = true;
			if (!list_empty(&dev->power.entry))
				list_move_tail(&dev->power.entry, &dpm_prepared_list);
		} else if (error == -EAGAIN) {
			error = 0;
		} else {
			dev_info(dev, "not prepared for power transition: code %d\n",
				 error);
		}

		mutex_unlock(&dpm_list_mtx);

		put_device(dev);

		mutex_lock(&dpm_list_mtx);
	}
	mutex_unlock(&dpm_list_mtx);
	trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
+2 −0
Original line number Diff line number Diff line
@@ -90,6 +90,8 @@ async_schedule_dev(async_func_t func, struct device *dev)
	return async_schedule_node(func, dev, dev_to_node(dev));
}

bool async_schedule_dev_nocall(async_func_t func, struct device *dev);

/**
 * async_schedule_dev_domain - A device specific version of async_schedule_domain
 * @func: function to execute asynchronously
+63 −22
Original line number Diff line number Diff line
@@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work)
	wake_up(&async_done);
}

static async_cookie_t __async_schedule_node_domain(async_func_t func,
						   void *data, int node,
						   struct async_domain *domain,
						   struct async_entry *entry)
{
	async_cookie_t newcookie;
	unsigned long flags;

	INIT_LIST_HEAD(&entry->domain_list);
	INIT_LIST_HEAD(&entry->global_list);
	INIT_WORK(&entry->work, async_run_entry_fn);
	entry->func = func;
	entry->data = data;
	entry->domain = domain;

	spin_lock_irqsave(&async_lock, flags);

	/* allocate cookie and queue */
	newcookie = entry->cookie = next_cookie++;

	list_add_tail(&entry->domain_list, &domain->pending);
	if (domain->registered)
		list_add_tail(&entry->global_list, &async_global_pending);

	atomic_inc(&entry_count);
	spin_unlock_irqrestore(&async_lock, flags);

	/* schedule for execution */
	queue_work_node(node, system_unbound_wq, &entry->work);

	return newcookie;
}

/**
 * async_schedule_node_domain - NUMA specific version of async_schedule_domain
 * @func: function to execute asynchronously
@@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
		func(data, newcookie);
		return newcookie;
	}
	INIT_LIST_HEAD(&entry->domain_list);
	INIT_LIST_HEAD(&entry->global_list);
	INIT_WORK(&entry->work, async_run_entry_fn);
	entry->func = func;
	entry->data = data;
	entry->domain = domain;

	spin_lock_irqsave(&async_lock, flags);

	/* allocate cookie and queue */
	newcookie = entry->cookie = next_cookie++;

	list_add_tail(&entry->domain_list, &domain->pending);
	if (domain->registered)
		list_add_tail(&entry->global_list, &async_global_pending);

	atomic_inc(&entry_count);
	spin_unlock_irqrestore(&async_lock, flags);

	/* schedule for execution */
	queue_work_node(node, system_unbound_wq, &entry->work);

	return newcookie;
	return __async_schedule_node_domain(func, data, node, domain, entry);
}
EXPORT_SYMBOL_GPL(async_schedule_node_domain);

@@ -231,6 +243,35 @@ async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
}
EXPORT_SYMBOL_GPL(async_schedule_node);

/**
 * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
 * @func: function to execute asynchronously
 * @dev: device argument to be passed to function
 *
 * @dev is used as both the argument for the function and to provide NUMA
 * context for where to run the function.
 *
 * If the asynchronous execution of @func is scheduled successfully, return
 * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
 * that will run the function synchronously then.
 */
bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
{
	struct async_entry *entry;

	entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);

	/* Give up if there is no memory or too much work. */
	if (!entry || atomic_read(&entry_count) > MAX_WORK) {
		kfree(entry);
		return false;
	}

	__async_schedule_node_domain(func, dev, dev_to_node(dev),
				     &async_dfl_domain, entry);
	return true;
}

/**
 * async_synchronize_full - synchronize all asynchronous function calls
 *