Commit 88e9c0bf authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branches 'pm-cpufreq', 'pm-cpu' and 'pm-em'

* pm-cpufreq:
  cpufreq: intel_pstate: Process HWP Guaranteed change notification
  thermal: intel: Allow processing of HWP interrupt
  cpufreq: schedutil: Use kobject release() method to free sugov_tunables
  cpufreq: Replace deprecated CPU-hotplug functions

* pm-cpu:
  notifier: Remove atomic_notifier_call_chain_robust()
  PM: cpu: Make notifier chain use a raw_spinlock_t

* pm-em:
  PM: EM: Increase energy calculation precision
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -163,9 +163,9 @@ static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
	if (ret || val > 1)
		return -EINVAL;

	get_online_cpus();
	cpus_read_lock();
	set_boost(policy, val);
	put_online_cpus();
	cpus_read_unlock();

	return count;
}
+3 −3
Original line number Diff line number Diff line
@@ -2654,18 +2654,18 @@ int cpufreq_boost_trigger_state(int state)
	cpufreq_driver->boost_enabled = state;
	write_unlock_irqrestore(&cpufreq_driver_lock, flags);

	get_online_cpus();
	cpus_read_lock();
	for_each_active_policy(policy) {
		ret = cpufreq_driver->set_boost(policy, state);
		if (ret)
			goto err_reset_state;
	}
	put_online_cpus();
	cpus_read_unlock();

	return 0;

err_reset_state:
	put_online_cpus();
	cpus_read_unlock();

	write_lock_irqsave(&cpufreq_driver_lock, flags);
	cpufreq_driver->boost_enabled = !state;
+2 −2
Original line number Diff line number Diff line
@@ -418,7 +418,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
	default_powersave_bias = powersave_bias;
	cpumask_clear(&done);

	get_online_cpus();
	cpus_read_lock();
	for_each_online_cpu(cpu) {
		struct cpufreq_policy *policy;
		struct policy_dbs_info *policy_dbs;
@@ -442,7 +442,7 @@ static void od_set_powersave_bias(unsigned int powersave_bias)
		od_tuners = dbs_data->tuners;
		od_tuners->powersave_bias = default_powersave_bias;
	}
	put_online_cpus();
	cpus_read_unlock();
}

void od_register_powersave_bias_handler(unsigned int (*f)
+41 −2
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
#include "../drivers/thermal/intel/thermal_interrupt.h"

#define INTEL_PSTATE_SAMPLING_INTERVAL	(10 * NSEC_PER_MSEC)

@@ -219,6 +220,7 @@ struct global_params {
 * @sched_flags:	Store scheduler flags for possible cross CPU update
 * @hwp_boost_min:	Last HWP boosted min performance
 * @suspended:		Whether or not the driver has been suspended.
 * @hwp_notify_work:	workqueue for HWP notifications.
 *
 * This structure stores per CPU instance data for all CPUs.
 */
@@ -257,6 +259,7 @@ struct cpudata {
	unsigned int sched_flags;
	u32 hwp_boost_min;
	bool suspended;
	struct delayed_work hwp_notify_work;
};

static struct cpudata **all_cpu_data;
@@ -1625,6 +1628,40 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)

/************************** sysfs end ************************/

static void intel_pstate_notify_work(struct work_struct *work)
{
	mutex_lock(&intel_pstate_driver_lock);
	cpufreq_update_policy(smp_processor_id());
	wrmsrl(MSR_HWP_STATUS, 0);
	mutex_unlock(&intel_pstate_driver_lock);
}

void notify_hwp_interrupt(void)
{
	unsigned int this_cpu = smp_processor_id();
	struct cpudata *cpudata;
	u64 value;

	if (!hwp_active || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
		return;

	rdmsrl(MSR_HWP_STATUS, value);
	if (!(value & 0x01))
		return;

	cpudata = all_cpu_data[this_cpu];
	schedule_delayed_work_on(this_cpu, &cpudata->hwp_notify_work, msecs_to_jiffies(10));
}

static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
	/* Enable HWP notification interrupt for guaranteed performance change */
	if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
		INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
	}
}

static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
	/* First disable HWP notification interrupt as we don't process them */
@@ -1634,6 +1671,8 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
	if (cpudata->epp_default == -EINVAL)
		cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);

	intel_pstate_enable_hwp_interrupt(cpudata);
}

static int atom_get_min_pstate(void)
@@ -2969,7 +3008,7 @@ static void intel_pstate_driver_cleanup(void)
{
	unsigned int cpu;

	get_online_cpus();
	cpus_read_lock();
	for_each_online_cpu(cpu) {
		if (all_cpu_data[cpu]) {
			if (intel_pstate_driver == &intel_pstate)
@@ -2979,7 +3018,7 @@ static void intel_pstate_driver_cleanup(void)
			all_cpu_data[cpu] = NULL;
		}
	}
	put_online_cpus();
	cpus_read_unlock();

	intel_pstate_driver = NULL;
}
+3 −3
Original line number Diff line number Diff line
@@ -1180,7 +1180,7 @@ static int powernowk8_init(void)
	if (!x86_match_cpu(powernow_k8_ids))
		return -ENODEV;

	get_online_cpus();
	cpus_read_lock();
	for_each_online_cpu(i) {
		smp_call_function_single(i, check_supported_cpu, &ret, 1);
		if (!ret)
@@ -1188,10 +1188,10 @@ static int powernowk8_init(void)
	}

	if (supported_cpus != num_online_cpus()) {
		put_online_cpus();
		cpus_read_unlock();
		return -ENODEV;
	}
	put_online_cpus();
	cpus_read_unlock();

	ret = cpufreq_register_driver(&cpufreq_amd64_driver);
	if (ret)
Loading