Commit 78baa1ea authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branches 'pm-cpuidle' and 'pm-sleep'

* pm-cpuidle:
  PM / Domains: Add genpd governor for CPUs
  cpuidle: Export the next timer expiration for CPUs
  PM / Domains: Add support for CPU devices to genpd
  PM / Domains: Add generic data pointer to struct genpd_power_state
  cpuidle: exynos: Unify target residency for AFTR and coupled AFTR states

* pm-sleep:
  PM / core: Propagate dev->power.wakeup_path when no callbacks
  PM / core: Introduce dpm_async_fn() helper
  PM / core: fix kerneldoc comment for device_pm_wait_for_dev()
  PM / core: fix kerneldoc comment for dpm_watchdog_handler()
  PM / sleep: Measure the time of filesystems syncing
  PM / sleep: Refactor filesystems sync to reduce duplication
  PM / wakeup: Use pm_pr_dbg() instead of pr_debug()
Loading
Loading
Loading
Loading
+74 −3
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/cpu.h>

#include "power.h"

@@ -128,6 +129,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
#define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
#define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)

static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
		const struct generic_pm_domain *genpd)
@@ -1454,6 +1456,56 @@ static void genpd_free_dev_data(struct device *dev,
	dev_pm_put_subsys_data(dev);
}

static void __genpd_update_cpumask(struct generic_pm_domain *genpd,
				   int cpu, bool set, unsigned int depth)
{
	struct gpd_link *link;

	if (!genpd_is_cpu_domain(genpd))
		return;

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		struct generic_pm_domain *master = link->master;

		genpd_lock_nested(master, depth + 1);
		__genpd_update_cpumask(master, cpu, set, depth + 1);
		genpd_unlock(master);
	}

	if (set)
		cpumask_set_cpu(cpu, genpd->cpus);
	else
		cpumask_clear_cpu(cpu, genpd->cpus);
}

static void genpd_update_cpumask(struct generic_pm_domain *genpd,
				 struct device *dev, bool set)
{
	int cpu;

	if (!genpd_is_cpu_domain(genpd))
		return;

	for_each_possible_cpu(cpu) {
		if (get_cpu_device(cpu) == dev) {
			__genpd_update_cpumask(genpd, cpu, set, 0);
			return;
		}
	}
}

static void genpd_set_cpumask(struct generic_pm_domain *genpd,
			      struct device *dev)
{
	genpd_update_cpumask(genpd, dev, true);
}

static void genpd_clear_cpumask(struct generic_pm_domain *genpd,
				struct device *dev)
{
	genpd_update_cpumask(genpd, dev, false);
}

static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
			    struct gpd_timing_data *td)
{
@@ -1475,6 +1527,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,

	genpd_lock(genpd);

	genpd_set_cpumask(genpd, dev);
	dev_pm_domain_set(dev, &genpd->domain);

	genpd->device_count++;
@@ -1532,6 +1585,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
	genpd->device_count--;
	genpd->max_off_time_changed = true;

	genpd_clear_cpumask(genpd, dev);
	dev_pm_domain_set(dev, NULL);

	list_del_init(&pdd->list_node);
@@ -1686,6 +1740,12 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);

static void genpd_free_default_power_state(struct genpd_power_state *states,
					   unsigned int state_count)
{
	kfree(states);
}

static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
{
	struct genpd_power_state *state;
@@ -1696,7 +1756,7 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)

	genpd->states = state;
	genpd->state_count = 1;
	genpd->free = state;
	genpd->free_states = genpd_free_default_power_state;

	return 0;
}
@@ -1762,11 +1822,18 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
	if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
		return -EINVAL;

	if (genpd_is_cpu_domain(genpd) &&
	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
		return -ENOMEM;

	/* Use only one "off" state if there were no states declared */
	if (genpd->state_count == 0) {
		ret = genpd_set_default_power_state(genpd);
		if (ret)
		if (ret) {
			if (genpd_is_cpu_domain(genpd))
				free_cpumask_var(genpd->cpus);
			return ret;
		}
	} else if (!gov && genpd->state_count > 1) {
		pr_warn("%s: no governor for states\n", genpd->name);
	}
@@ -1812,7 +1879,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
	list_del(&genpd->gpd_list_node);
	genpd_unlock(genpd);
	cancel_work_sync(&genpd->power_off_work);
	kfree(genpd->free);
	if (genpd_is_cpu_domain(genpd))
		free_cpumask_var(genpd->cpus);
	if (genpd->free_states)
		genpd->free_states(genpd->states, genpd->state_count);

	pr_debug("%s: removed %s\n", __func__, genpd->name);

	return 0;
+66 −1
Original line number Diff line number Diff line
@@ -10,6 +10,9 @@
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/ktime.h>

static int dev_update_qos_constraint(struct device *dev, void *data)
{
@@ -210,8 +213,10 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
	struct generic_pm_domain *genpd = pd_to_genpd(pd);
	struct gpd_link *link;

	if (!genpd->max_off_time_changed)
	if (!genpd->max_off_time_changed) {
		genpd->state_idx = genpd->cached_power_down_state_idx;
		return genpd->cached_power_down_ok;
	}

	/*
	 * We have to invalidate the cached results for the masters, so
@@ -236,6 +241,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
		genpd->state_idx--;
	}

	genpd->cached_power_down_state_idx = genpd->state_idx;
	return genpd->cached_power_down_ok;
}

@@ -244,6 +250,65 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
	return false;
}

#ifdef CONFIG_CPU_IDLE
static bool cpu_power_down_ok(struct dev_pm_domain *pd)
{
	struct generic_pm_domain *genpd = pd_to_genpd(pd);
	struct cpuidle_device *dev;
	ktime_t domain_wakeup, next_hrtimer;
	s64 idle_duration_ns;
	int cpu, i;

	/* Validate dev PM QoS constraints. */
	if (!default_power_down_ok(pd))
		return false;

	if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
		return true;

	/*
	 * Find the next wakeup for any of the online CPUs within the PM domain
	 * and its subdomains. Note, we only need the genpd->cpus, as it already
	 * contains a mask of all CPUs from subdomains.
	 */
	domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
	for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
		dev = per_cpu(cpuidle_devices, cpu);
		if (dev) {
			next_hrtimer = READ_ONCE(dev->next_hrtimer);
			if (ktime_before(next_hrtimer, domain_wakeup))
				domain_wakeup = next_hrtimer;
		}
	}

	/* The minimum idle duration is from now - until the next wakeup. */
	idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
	if (idle_duration_ns <= 0)
		return false;

	/*
	 * Find the deepest idle state that has its residency value satisfied
	 * and by also taking into account the power off latency for the state.
	 * Start at the state picked by the dev PM QoS constraint validation.
	 */
	i = genpd->state_idx;
	do {
		if (idle_duration_ns >= (genpd->states[i].residency_ns +
		    genpd->states[i].power_off_latency_ns)) {
			genpd->state_idx = i;
			return true;
		}
	} while (--i >= 0);

	return false;
}

struct dev_power_governor pm_domain_cpu_gov = {
	.suspend_ok = default_suspend_ok,
	.power_down_ok = cpu_power_down_ok,
};
#endif

struct dev_power_governor simple_qos_governor = {
	.suspend_ok = default_suspend_ok,
	.power_down_ok = default_power_down_ok,
+29 −41
Original line number Diff line number Diff line
@@ -478,7 +478,7 @@ struct dpm_watchdog {

/**
 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
 * @data: Watchdog object address.
 * @t: The timer that PM watchdog depends on.
 *
 * Called when a driver has timed out suspending or resuming.
 * There's not much we can do here to recover so panic() to
@@ -706,6 +706,19 @@ static bool is_async(struct device *dev)
		&& !pm_trace_is_enabled();
}

static bool dpm_async_fn(struct device *dev, async_func_t func)
{
	reinit_completion(&dev->power.completion);

	if (is_async(dev)) {
		get_device(dev);
		async_schedule(func, dev);
		return true;
	}

	return false;
}

static void async_resume_noirq(void *data, async_cookie_t cookie)
{
	struct device *dev = (struct device *)data;
@@ -732,13 +745,8 @@ void dpm_noirq_resume_devices(pm_message_t state)
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule_dev(async_resume_noirq, dev);
		}
	}
	list_for_each_entry(dev, &dpm_noirq_list, power.entry)
		dpm_async_fn(dev, async_resume_noirq);

	while (!list_empty(&dpm_noirq_list)) {
		dev = to_device(dpm_noirq_list.next);
@@ -889,13 +897,8 @@ void dpm_resume_early(pm_message_t state)
	 * in case the starting of async threads is
	 * delayed by non-async resuming devices.
	 */
	list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule_dev(async_resume_early, dev);
		}
	}
	list_for_each_entry(dev, &dpm_late_early_list, power.entry)
		dpm_async_fn(dev, async_resume_early);

	while (!list_empty(&dpm_late_early_list)) {
		dev = to_device(dpm_late_early_list.next);
@@ -1053,13 +1056,8 @@ void dpm_resume(pm_message_t state)
	pm_transition = state;
	async_error = 0;

	list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
		reinit_completion(&dev->power.completion);
		if (is_async(dev)) {
			get_device(dev);
			async_schedule_dev(async_resume, dev);
		}
	}
	list_for_each_entry(dev, &dpm_suspended_list, power.entry)
		dpm_async_fn(dev, async_resume);

	while (!list_empty(&dpm_suspended_list)) {
		dev = to_device(dpm_suspended_list.next);
@@ -1373,13 +1371,9 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie)

static int device_suspend_noirq(struct device *dev)
{
	reinit_completion(&dev->power.completion);

	if (is_async(dev)) {
		get_device(dev);
		async_schedule_dev(async_suspend_noirq, dev);
	if (dpm_async_fn(dev, async_suspend_noirq))
		return 0;
	}

	return __device_suspend_noirq(dev, pm_transition, false);
}

@@ -1576,13 +1570,8 @@ static void async_suspend_late(void *data, async_cookie_t cookie)

static int device_suspend_late(struct device *dev)
{
	reinit_completion(&dev->power.completion);

	if (is_async(dev)) {
		get_device(dev);
		async_schedule_dev(async_suspend_late, dev);
	if (dpm_async_fn(dev, async_suspend_late))
		return 0;
	}

	return __device_suspend_late(dev, pm_transition, false);
}
@@ -1747,6 +1736,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
	if (dev->power.syscore)
		goto Complete;

	/* Avoid direct_complete to let wakeup_path propagate. */
	if (device_may_wakeup(dev) || dev->power.wakeup_path)
		dev->power.direct_complete = false;

	if (dev->power.direct_complete) {
		if (pm_runtime_status_suspended(dev)) {
			pm_runtime_disable(dev);
@@ -1842,13 +1835,8 @@ static void async_suspend(void *data, async_cookie_t cookie)

static int device_suspend(struct device *dev)
{
	reinit_completion(&dev->power.completion);

	if (is_async(dev)) {
		get_device(dev);
		async_schedule_dev(async_suspend, dev);
	if (dpm_async_fn(dev, async_suspend))
		return 0;
	}

	return __device_suspend(dev, pm_transition, false);
}
@@ -2069,8 +2057,8 @@ EXPORT_SYMBOL_GPL(__suspend_report_result);

/**
 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
 * @dev: Device to wait for.
 * @subordinate: Device that needs to wait for @dev.
 * @dev: Device to wait for.
 */
int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
{
+3 −3
Original line number Diff line number Diff line
@@ -804,7 +804,7 @@ void pm_print_active_wakeup_sources(void)
	srcuidx = srcu_read_lock(&wakeup_srcu);
	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
		if (ws->active) {
			pr_debug("active wakeup source: %s\n", ws->name);
			pm_pr_dbg("active wakeup source: %s\n", ws->name);
			active = 1;
		} else if (!active &&
			   (!last_activity_ws ||
@@ -815,7 +815,7 @@ void pm_print_active_wakeup_sources(void)
	}

	if (!active && last_activity_ws)
		pr_debug("last active wakeup source: %s\n",
		pm_pr_dbg("last active wakeup source: %s\n",
			last_activity_ws->name);
	srcu_read_unlock(&wakeup_srcu, srcuidx);
}
@@ -845,7 +845,7 @@ bool pm_wakeup_pending(void)
	raw_spin_unlock_irqrestore(&events_lock, flags);

	if (ret) {
		pr_debug("Wakeup pending, aborting suspend\n");
		pm_pr_dbg("Wakeup pending, aborting suspend\n");
		pm_print_active_wakeup_sources();
	}

+1 −1
Original line number Diff line number Diff line
@@ -84,7 +84,7 @@ static struct cpuidle_driver exynos_idle_driver = {
		[1] = {
			.enter			= exynos_enter_lowpower,
			.exit_latency		= 300,
			.target_residency	= 100000,
			.target_residency	= 10000,
			.name			= "C1",
			.desc			= "ARM power down",
		},
Loading