Commit 40aa4db6 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge cpuidle material depended on by the subsequent changes.

parents 3ccf3f0c e9499968
Loading
Loading
Loading
Loading
+74 −3
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/cpu.h>

#include "power.h"

@@ -128,6 +129,7 @@ static const struct genpd_lock_ops genpd_spin_ops = {
#define genpd_is_irq_safe(genpd)	(genpd->flags & GENPD_FLAG_IRQ_SAFE)
#define genpd_is_always_on(genpd)	(genpd->flags & GENPD_FLAG_ALWAYS_ON)
#define genpd_is_active_wakeup(genpd)	(genpd->flags & GENPD_FLAG_ACTIVE_WAKEUP)
#define genpd_is_cpu_domain(genpd)	(genpd->flags & GENPD_FLAG_CPU_DOMAIN)

static inline bool irq_safe_dev_in_no_sleep_domain(struct device *dev,
		const struct generic_pm_domain *genpd)
@@ -1452,6 +1454,56 @@ static void genpd_free_dev_data(struct device *dev,
	dev_pm_put_subsys_data(dev);
}

static void __genpd_update_cpumask(struct generic_pm_domain *genpd,
				   int cpu, bool set, unsigned int depth)
{
	struct gpd_link *link;

	if (!genpd_is_cpu_domain(genpd))
		return;

	list_for_each_entry(link, &genpd->slave_links, slave_node) {
		struct generic_pm_domain *master = link->master;

		genpd_lock_nested(master, depth + 1);
		__genpd_update_cpumask(master, cpu, set, depth + 1);
		genpd_unlock(master);
	}

	if (set)
		cpumask_set_cpu(cpu, genpd->cpus);
	else
		cpumask_clear_cpu(cpu, genpd->cpus);
}

static void genpd_update_cpumask(struct generic_pm_domain *genpd,
				 struct device *dev, bool set)
{
	int cpu;

	if (!genpd_is_cpu_domain(genpd))
		return;

	for_each_possible_cpu(cpu) {
		if (get_cpu_device(cpu) == dev) {
			__genpd_update_cpumask(genpd, cpu, set, 0);
			return;
		}
	}
}

static void genpd_set_cpumask(struct generic_pm_domain *genpd,
			      struct device *dev)
{
	genpd_update_cpumask(genpd, dev, true);
}

static void genpd_clear_cpumask(struct generic_pm_domain *genpd,
				struct device *dev)
{
	genpd_update_cpumask(genpd, dev, false);
}

static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
			    struct gpd_timing_data *td)
{
@@ -1473,6 +1525,7 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,

	genpd_lock(genpd);

	genpd_set_cpumask(genpd, dev);
	dev_pm_domain_set(dev, &genpd->domain);

	genpd->device_count++;
@@ -1530,6 +1583,7 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
	genpd->device_count--;
	genpd->max_off_time_changed = true;

	genpd_clear_cpumask(genpd, dev);
	dev_pm_domain_set(dev, NULL);

	list_del_init(&pdd->list_node);
@@ -1684,6 +1738,12 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);

static void genpd_free_default_power_state(struct genpd_power_state *states,
					   unsigned int state_count)
{
	kfree(states);
}

static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
{
	struct genpd_power_state *state;
@@ -1694,7 +1754,7 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)

	genpd->states = state;
	genpd->state_count = 1;
	genpd->free = state;
	genpd->free_states = genpd_free_default_power_state;

	return 0;
}
@@ -1760,11 +1820,18 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
	if (genpd_is_always_on(genpd) && !genpd_status_on(genpd))
		return -EINVAL;

	if (genpd_is_cpu_domain(genpd) &&
	    !zalloc_cpumask_var(&genpd->cpus, GFP_KERNEL))
		return -ENOMEM;

	/* Use only one "off" state if there were no states declared */
	if (genpd->state_count == 0) {
		ret = genpd_set_default_power_state(genpd);
		if (ret)
		if (ret) {
			if (genpd_is_cpu_domain(genpd))
				free_cpumask_var(genpd->cpus);
			return ret;
		}
	} else if (!gov && genpd->state_count > 1) {
		pr_warn("%s: no governor for states\n", genpd->name);
	}
@@ -1810,7 +1877,11 @@ static int genpd_remove(struct generic_pm_domain *genpd)
	list_del(&genpd->gpd_list_node);
	genpd_unlock(genpd);
	cancel_work_sync(&genpd->power_off_work);
	kfree(genpd->free);
	if (genpd_is_cpu_domain(genpd))
		free_cpumask_var(genpd->cpus);
	if (genpd->free_states)
		genpd->free_states(genpd->states, genpd->state_count);

	pr_debug("%s: removed %s\n", __func__, genpd->name);

	return 0;
+66 −1
Original line number Diff line number Diff line
@@ -10,6 +10,9 @@
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/ktime.h>

static int dev_update_qos_constraint(struct device *dev, void *data)
{
@@ -210,8 +213,10 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
	struct generic_pm_domain *genpd = pd_to_genpd(pd);
	struct gpd_link *link;

	if (!genpd->max_off_time_changed)
	if (!genpd->max_off_time_changed) {
		genpd->state_idx = genpd->cached_power_down_state_idx;
		return genpd->cached_power_down_ok;
	}

	/*
	 * We have to invalidate the cached results for the masters, so
@@ -236,6 +241,7 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
		genpd->state_idx--;
	}

	genpd->cached_power_down_state_idx = genpd->state_idx;
	return genpd->cached_power_down_ok;
}

@@ -244,6 +250,65 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
	return false;
}

#ifdef CONFIG_CPU_IDLE
static bool cpu_power_down_ok(struct dev_pm_domain *pd)
{
	struct generic_pm_domain *genpd = pd_to_genpd(pd);
	struct cpuidle_device *dev;
	ktime_t domain_wakeup, next_hrtimer;
	s64 idle_duration_ns;
	int cpu, i;

	/* Validate dev PM QoS constraints. */
	if (!default_power_down_ok(pd))
		return false;

	if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
		return true;

	/*
	 * Find the next wakeup for any of the online CPUs within the PM domain
	 * and its subdomains. Note, we only need the genpd->cpus, as it already
	 * contains a mask of all CPUs from subdomains.
	 */
	domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
	for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
		dev = per_cpu(cpuidle_devices, cpu);
		if (dev) {
			next_hrtimer = READ_ONCE(dev->next_hrtimer);
			if (ktime_before(next_hrtimer, domain_wakeup))
				domain_wakeup = next_hrtimer;
		}
	}

	/* The minimum idle duration is from now - until the next wakeup. */
	idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, ktime_get()));
	if (idle_duration_ns <= 0)
		return false;

	/*
	 * Find the deepest idle state that has its residency value satisfied
	 * and by also taking into account the power off latency for the state.
	 * Start at the state picked by the dev PM QoS constraint validation.
	 */
	i = genpd->state_idx;
	do {
		if (idle_duration_ns >= (genpd->states[i].residency_ns +
		    genpd->states[i].power_off_latency_ns)) {
			genpd->state_idx = i;
			return true;
		}
	} while (--i >= 0);

	return false;
}

struct dev_power_governor pm_domain_cpu_gov = {
	.suspend_ok = default_suspend_ok,
	.power_down_ok = cpu_power_down_ok,
};
#endif

struct dev_power_governor simple_qos_governor = {
	.suspend_ok = default_suspend_ok,
	.power_down_ok = default_power_down_ok,
+1 −1
Original line number Diff line number Diff line
@@ -84,7 +84,7 @@ static struct cpuidle_driver exynos_idle_driver = {
		[1] = {
			.enter			= exynos_enter_lowpower,
			.exit_latency		= 300,
			.target_residency	= 100000,
			.target_residency	= 10000,
			.name			= "C1",
			.desc			= "ARM power down",
		},
+17 −2
Original line number Diff line number Diff line
@@ -328,9 +328,23 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
		  int index)
{
	int ret = 0;

	/*
	 * Store the next hrtimer, which becomes either next tick or the next
	 * timer event, whatever expires first. Additionally, to make this data
	 * useful for consumers outside cpuidle, we rely on that the governor's
	 * ->select() callback have decided, whether to stop the tick or not.
	 */
	WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());

	if (cpuidle_state_is_coupled(drv, index))
		return cpuidle_enter_state_coupled(dev, drv, index);
	return cpuidle_enter_state(dev, drv, index);
		ret = cpuidle_enter_state_coupled(dev, drv, index);
	else
		ret = cpuidle_enter_state(dev, drv, index);

	WRITE_ONCE(dev->next_hrtimer, 0);
	return ret;
}

/**
@@ -511,6 +525,7 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
{
	memset(dev->states_usage, 0, sizeof(dev->states_usage));
	dev->last_residency = 0;
	dev->next_hrtimer = 0;
}

/**
+1 −0
Original line number Diff line number Diff line
@@ -83,6 +83,7 @@ struct cpuidle_device {
	unsigned int		use_deepest_state:1;
	unsigned int		poll_time_limit:1;
	unsigned int		cpu;
	ktime_t			next_hrtimer;

	int			last_residency;
	struct cpuidle_state_usage	states_usage[CPUIDLE_STATE_MAX];
Loading