Commit d988c913 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branch 'pm-cpufreq'

Merge cpufreq updates for 5.19-rc1:

 - Fix cpufreq governor clean up code to avoid using kfree() directly
   to free kobject-based items (Kevin Hao).

 - Prepare cpufreq for powerpc's asm/prom.h cleanup (Christophe Leroy).

 - Make intel_pstate notify frequency invariance code when no_turbo is
   turned on and off (Chen Yu).

 - Add Sapphire Rapids OOB mode support to intel_pstate (Srinivas
   Pandruvada).

 - Make cpufreq avoid unnecessary frequency updates due to mismatch
   between hardware and the frequency table (Viresh Kumar).

 - Make remove_cpu_dev_symlink() clear the real_cpus mask to simplify
   code (Viresh Kumar).

 - Rearrange cpufreq_offline() and cpufreq_remove_dev() to make the
   calling convention for some driver callbacks consistent (Rafael
   Wysocki).

 - Avoid accessing half-initialized cpufreq policies from the show()
   and store() sysfs functions (Schspa Shi).

 - Rearrange cpufreq_offline() to make the calling convention for some
   driver callbacks consistent (Schspa Shi).

 - Update CPPC handling in cpufreq (Pierre Gondois):

   * Add per_cpu efficiency_class to the CPPC driver.
   * Make the CPPC driver Register EM based on efficiency class
     information.
   * Adjust _OSC for flexible address space in the ACPI platform
     initialization code and always set CPPC _OSC bits if CPPC_LIB is
     supported.
   * Assume no transition latency if no PCCT in the CPPC driver.
   * Add fast_switch and dvfs_possible_from_any_cpu support to the CPPC
     driver.

* pm-cpufreq:
  cpufreq: CPPC: Enable dvfs_possible_from_any_cpu
  cpufreq: CPPC: Enable fast_switch
  ACPI: CPPC: Assume no transition latency if no PCCT
  ACPI: bus: Set CPPC _OSC bits for all and when CPPC_LIB is supported
  ACPI: CPPC: Check _OSC for flexible address space
  cpufreq: make interface functions and lock holding state clear
  cpufreq: Abort show()/store() for half-initialized policies
  cpufreq: Rearrange locking in cpufreq_remove_dev()
  cpufreq: Split cpufreq_offline()
  cpufreq: Reorganize checks in cpufreq_offline()
  cpufreq: Clear real_cpus mask from remove_cpu_dev_symlink()
  cpufreq: intel_pstate: Support Sapphire Rapids OOB mode
  Revert "cpufreq: Fix possible race in cpufreq online error path"
  cpufreq: CPPC: Register EM based on efficiency class information
  cpufreq: CPPC: Add per_cpu efficiency_class
  cpufreq: Avoid unnecessary frequency updates due to mismatch
  cpufreq: Fix possible race in cpufreq online error path
  cpufreq: intel_pstate: Handle no_turbo in frequency invariance
  cpufreq: Prepare cleanup of powerpc's asm/prom.h
  cpufreq: governor: Use kobject release() method to free dbs_data
parents 16a23f39 2d41dc23
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -512,6 +512,7 @@ struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
{
	return &cpu_madt_gicc[cpu];
}
EXPORT_SYMBOL_GPL(acpi_cpu_get_madt_gicc);

/*
 * acpi_map_gic_cpu_interface - parse processor MADT entry
+26 −8
Original line number Diff line number Diff line
@@ -278,6 +278,20 @@ bool osc_sb_apei_support_acked;
bool osc_pc_lpi_support_confirmed;
EXPORT_SYMBOL_GPL(osc_pc_lpi_support_confirmed);

/*
 * ACPI 6.2 Section 6.2.11.2 'Platform-Wide OSPM Capabilities':
 *   Starting with ACPI Specification 6.2, all _CPC registers can be in
 *   PCC, System Memory, System IO, or Functional Fixed Hardware address
 *   spaces. OSPM support for this more flexible register space scheme is
 *   indicated by the “Flexible Address Space for CPPC Registers” _OSC bit.
 *
 * Otherwise (cf ACPI 6.1, s8.4.7.1.1.X), _CPC registers must be in:
 * - PCC or Functional Fixed Hardware address space if defined
 * - SystemMemory address space (NULL register) if not defined
 */
bool osc_cpc_flexible_adr_space_confirmed;
EXPORT_SYMBOL_GPL(osc_cpc_flexible_adr_space_confirmed);

/*
 * ACPI 6.4 Operating System Capabilities for USB.
 */
@@ -315,12 +329,15 @@ static void acpi_bus_osc_negotiate_platform_control(void)
#endif
#ifdef CONFIG_X86
	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GENERIC_INITIATOR_SUPPORT;
	if (boot_cpu_has(X86_FEATURE_HWP)) {
#endif

#ifdef CONFIG_ACPI_CPPC_LIB
	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_SUPPORT;
	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPCV2_SUPPORT;
	}
#endif

	capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_FLEXIBLE_ADR_SPACE;

	if (IS_ENABLED(CONFIG_SCHED_MC_PRIO))
		capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_CPC_DIVERSE_HIGH_SUPPORT;

@@ -341,8 +358,7 @@ static void acpi_bus_osc_negotiate_platform_control(void)
		return;
	}

#ifdef CONFIG_X86
	if (boot_cpu_has(X86_FEATURE_HWP))
#ifdef CONFIG_ACPI_CPPC_LIB
	osc_sb_cppc_not_supported = !(capbuf_ret[OSC_SUPPORT_DWORD] &
			(OSC_SB_CPC_SUPPORT | OSC_SB_CPCV2_SUPPORT));
#endif
@@ -366,6 +382,8 @@ static void acpi_bus_osc_negotiate_platform_control(void)
			capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_PCLPI_SUPPORT;
		osc_sb_native_usb4_support_confirmed =
			capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_NATIVE_USB4_SUPPORT;
		osc_cpc_flexible_adr_space_confirmed =
			capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_CPC_FLEXIBLE_ADR_SPACE;
	}

	kfree(context.ret.pointer);
+43 −1
Original line number Diff line number Diff line
@@ -100,6 +100,16 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
				(cpc)->cpc_entry.reg.space_id ==	\
				ACPI_ADR_SPACE_PLATFORM_COMM)

/* Check if a CPC register is in SystemMemory */
#define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&	\
				(cpc)->cpc_entry.reg.space_id ==	\
				ACPI_ADR_SPACE_SYSTEM_MEMORY)

/* Check if a CPC register is in SystemIo */
#define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&	\
				(cpc)->cpc_entry.reg.space_id ==	\
				ACPI_ADR_SPACE_SYSTEM_IO)

/* Evaluates to True if reg is a NULL register descriptor */
#define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
				(reg)->address == 0 &&			\
@@ -424,6 +434,24 @@ bool acpi_cpc_valid(void)
}
EXPORT_SYMBOL_GPL(acpi_cpc_valid);

bool cppc_allow_fast_switch(void)
{
	struct cpc_register_resource *desired_reg;
	struct cpc_desc *cpc_ptr;
	int cpu;

	for_each_possible_cpu(cpu) {
		cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
		desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
		if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
				!CPC_IN_SYSTEM_IO(desired_reg))
			return false;
	}

	return true;
}
EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);

/**
 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
 * @cpu: Find all CPUs that share a domain with cpu.
@@ -736,6 +764,11 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
				if (gas_t->address) {
					void __iomem *addr;

					if (!osc_cpc_flexible_adr_space_confirmed) {
						pr_debug("Flexible address space capability not supported\n");
						goto out_free;
					}

					addr = ioremap(gas_t->address, gas_t->bit_width/8);
					if (!addr)
						goto out_free;
@@ -758,6 +791,10 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
						 gas_t->address);
					goto out_free;
				}
				if (!osc_cpc_flexible_adr_space_confirmed) {
					pr_debug("Flexible address space capability not supported\n");
					goto out_free;
				}
			} else {
				if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
					/* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
@@ -1447,6 +1484,9 @@ EXPORT_SYMBOL_GPL(cppc_set_perf);
 * transition latency for performance change requests. The closest we have
 * is the timing information from the PCCT tables which provides the info
 * on the number and frequency of PCC commands the platform can handle.
 *
 * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
 * then assume there is no latency.
 */
unsigned int cppc_get_transition_latency(int cpu_num)
{
@@ -1472,7 +1512,9 @@ unsigned int cppc_get_transition_latency(int cpu_num)
		return CPUFREQ_ETERNAL;

	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
	if (!CPC_IN_PCC(desired_reg))
	if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
		return 0;
	else if (!CPC_IN_PCC(desired_reg))
		return CPUFREQ_ETERNAL;

	if (pcc_ss_id < 0)
+211 −0
Original line number Diff line number Diff line
@@ -389,6 +389,27 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
	return ret;
}

static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
					      unsigned int target_freq)
{
	struct cppc_cpudata *cpu_data = policy->driver_data;
	unsigned int cpu = policy->cpu;
	u32 desired_perf;
	int ret;

	desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
	cpu_data->perf_ctrls.desired_perf = desired_perf;
	ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);

	if (ret) {
		pr_debug("Failed to set target on CPU:%d. ret:%d\n",
			 cpu, ret);
		return 0;
	}

	return target_freq;
}

static int cppc_verify_policy(struct cpufreq_policy_data *policy)
{
	cpufreq_verify_within_cpu_limits(policy);
@@ -420,12 +441,197 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
	return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
}

static DEFINE_PER_CPU(unsigned int, efficiency_class);
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);

/* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
#define CPPC_EM_CAP_STEP	(20)
/* Increase the cost value by CPPC_EM_COST_STEP every performance state. */
#define CPPC_EM_COST_STEP	(1)
/* Add a cost gap correspnding to the energy of 4 CPUs. */
#define CPPC_EM_COST_GAP	(4 * SCHED_CAPACITY_SCALE * CPPC_EM_COST_STEP \
				/ CPPC_EM_CAP_STEP)

static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
{
	struct cppc_perf_caps *perf_caps;
	unsigned int min_cap, max_cap;
	struct cppc_cpudata *cpu_data;
	int cpu = policy->cpu;

	cpu_data = policy->driver_data;
	perf_caps = &cpu_data->perf_caps;
	max_cap = arch_scale_cpu_capacity(cpu);
	min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
	if ((min_cap == 0) || (max_cap < min_cap))
		return 0;
	return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
}

/*
 * The cost is defined as:
 *   cost = power * max_frequency / frequency
 */
static inline unsigned long compute_cost(int cpu, int step)
{
	return CPPC_EM_COST_GAP * per_cpu(efficiency_class, cpu) +
			step * CPPC_EM_COST_STEP;
}

static int cppc_get_cpu_power(struct device *cpu_dev,
		unsigned long *power, unsigned long *KHz)
{
	unsigned long perf_step, perf_prev, perf, perf_check;
	unsigned int min_step, max_step, step, step_check;
	unsigned long prev_freq = *KHz;
	unsigned int min_cap, max_cap;
	struct cpufreq_policy *policy;

	struct cppc_perf_caps *perf_caps;
	struct cppc_cpudata *cpu_data;

	policy = cpufreq_cpu_get_raw(cpu_dev->id);
	cpu_data = policy->driver_data;
	perf_caps = &cpu_data->perf_caps;
	max_cap = arch_scale_cpu_capacity(cpu_dev->id);
	min_cap = div_u64(max_cap * perf_caps->lowest_perf,
			perf_caps->highest_perf);

	perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
	min_step = min_cap / CPPC_EM_CAP_STEP;
	max_step = max_cap / CPPC_EM_CAP_STEP;

	perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
	step = perf_prev / perf_step;

	if (step > max_step)
		return -EINVAL;

	if (min_step == max_step) {
		step = max_step;
		perf = perf_caps->highest_perf;
	} else if (step < min_step) {
		step = min_step;
		perf = perf_caps->lowest_perf;
	} else {
		step++;
		if (step == max_step)
			perf = perf_caps->highest_perf;
		else
			perf = step * perf_step;
	}

	*KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
	perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
	step_check = perf_check / perf_step;

	/*
	 * To avoid bad integer approximation, check that new frequency value
	 * increased and that the new frequency will be converted to the
	 * desired step value.
	 */
	while ((*KHz == prev_freq) || (step_check != step)) {
		perf++;
		*KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
		perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
		step_check = perf_check / perf_step;
	}

	/*
	 * With an artificial EM, only the cost value is used. Still the power
	 * is populated such as 0 < power < EM_MAX_POWER. This allows to add
	 * more sense to the artificial performance states.
	 */
	*power = compute_cost(cpu_dev->id, step);

	return 0;
}

static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
		unsigned long *cost)
{
	unsigned long perf_step, perf_prev;
	struct cppc_perf_caps *perf_caps;
	struct cpufreq_policy *policy;
	struct cppc_cpudata *cpu_data;
	unsigned int max_cap;
	int step;

	policy = cpufreq_cpu_get_raw(cpu_dev->id);
	cpu_data = policy->driver_data;
	perf_caps = &cpu_data->perf_caps;
	max_cap = arch_scale_cpu_capacity(cpu_dev->id);

	perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
	perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
	step = perf_prev / perf_step;

	*cost = compute_cost(cpu_dev->id, step);

	return 0;
}

static int populate_efficiency_class(void)
{
	struct acpi_madt_generic_interrupt *gicc;
	DECLARE_BITMAP(used_classes, 256) = {};
	int class, cpu, index;

	for_each_possible_cpu(cpu) {
		gicc = acpi_cpu_get_madt_gicc(cpu);
		class = gicc->efficiency_class;
		bitmap_set(used_classes, class, 1);
	}

	if (bitmap_weight(used_classes, 256) <= 1) {
		pr_debug("Efficiency classes are all equal (=%d). "
			"No EM registered", class);
		return -EINVAL;
	}

	/*
	 * Squeeze efficiency class values on [0:#efficiency_class-1].
	 * Values are per spec in [0:255].
	 */
	index = 0;
	for_each_set_bit(class, used_classes, 256) {
		for_each_possible_cpu(cpu) {
			gicc = acpi_cpu_get_madt_gicc(cpu);
			if (gicc->efficiency_class == class)
				per_cpu(efficiency_class, cpu) = index;
		}
		index++;
	}
	cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;

	return 0;
}

static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
{
	struct cppc_cpudata *cpu_data;
	struct em_data_callback em_cb =
		EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);

	cpu_data = policy->driver_data;
	em_dev_register_perf_domain(get_cpu_device(policy->cpu),
			get_perf_level_count(policy), &em_cb,
			cpu_data->shared_cpu_map, 0);
}

#else

static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
	return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
}
static int populate_efficiency_class(void)
{
	return 0;
}
static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
{
}
#endif


@@ -536,6 +742,9 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
		goto out;
	}

	policy->fast_switch_possible = cppc_allow_fast_switch();
	policy->dvfs_possible_from_any_cpu = true;

	/*
	 * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
	 * is supported.
@@ -681,6 +890,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
	.verify = cppc_verify_policy,
	.target = cppc_cpufreq_set_target,
	.get = cppc_cpufreq_get_rate,
	.fast_switch = cppc_cpufreq_fast_switch,
	.init = cppc_cpufreq_cpu_init,
	.exit = cppc_cpufreq_cpu_exit,
	.set_boost = cppc_cpufreq_set_boost,
@@ -742,6 +952,7 @@ static int __init cppc_cpufreq_init(void)

	cppc_check_hisi_workaround();
	cppc_freq_invariance_init();
	populate_efficiency_class();

	ret = cpufreq_register_driver(&cppc_cpufreq_driver);
	if (ret)
+69 −43
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
#include <linux/units.h>
#include <trace/events/power.h>

static LIST_HEAD(cpufreq_policy_list);
@@ -947,12 +948,13 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
	struct cpufreq_policy *policy = to_policy(kobj);
	struct freq_attr *fattr = to_attr(attr);
	ssize_t ret;
	ssize_t ret = -EBUSY;

	if (!fattr->show)
		return -EIO;

	down_read(&policy->rwsem);
	if (likely(!policy_is_inactive(policy)))
		ret = fattr->show(policy, buf);
	up_read(&policy->rwsem);

@@ -964,7 +966,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
{
	struct cpufreq_policy *policy = to_policy(kobj);
	struct freq_attr *fattr = to_attr(attr);
	ssize_t ret = -EINVAL;
	ssize_t ret = -EBUSY;

	if (!fattr->store)
		return -EIO;
@@ -978,6 +980,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,

	if (cpu_online(policy->cpu)) {
		down_write(&policy->rwsem);
		if (likely(!policy_is_inactive(policy)))
			ret = fattr->store(policy, buf, count);
		up_write(&policy->rwsem);
	}
@@ -1019,11 +1022,12 @@ static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
		dev_err(dev, "cpufreq symlink creation failed\n");
}

static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
				   struct device *dev)
{
	dev_dbg(dev, "%s: Removing symlink\n", __func__);
	sysfs_remove_link(&dev->kobj, "cpufreq");
	cpumask_clear_cpu(cpu, policy->real_cpus);
}

static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
@@ -1337,12 +1341,12 @@ static int cpufreq_online(unsigned int cpu)
		down_write(&policy->rwsem);
		policy->cpu = cpu;
		policy->governor = NULL;
		up_write(&policy->rwsem);
	} else {
		new_policy = true;
		policy = cpufreq_policy_alloc(cpu);
		if (!policy)
			return -ENOMEM;
		down_write(&policy->rwsem);
	}

	if (!new_policy && cpufreq_driver->online) {
@@ -1382,7 +1386,6 @@ static int cpufreq_online(unsigned int cpu)
		cpumask_copy(policy->related_cpus, policy->cpus);
	}

	down_write(&policy->rwsem);
	/*
	 * affected cpus must always be the one, which are online. We aren't
	 * managing offline cpus here.
@@ -1531,9 +1534,9 @@ static int cpufreq_online(unsigned int cpu)

out_destroy_policy:
	for_each_cpu(j, policy->real_cpus)
		remove_cpu_dev_symlink(policy, get_cpu_device(j));
		remove_cpu_dev_symlink(policy, j, get_cpu_device(j));

	up_write(&policy->rwsem);
	cpumask_clear(policy->cpus);

out_offline_policy:
	if (cpufreq_driver->offline)
@@ -1544,6 +1547,8 @@ static int cpufreq_online(unsigned int cpu)
		cpufreq_driver->exit(policy);

out_free_policy:
	up_write(&policy->rwsem);

	cpufreq_policy_free(policy);
	return ret;
}
@@ -1575,47 +1580,36 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
	return 0;
}

static int cpufreq_offline(unsigned int cpu)
static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
{
	struct cpufreq_policy *policy;
	int ret;

	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);

	policy = cpufreq_cpu_get_raw(cpu);
	if (!policy) {
		pr_debug("%s: No cpu_data found\n", __func__);
		return 0;
	}

	down_write(&policy->rwsem);
	if (has_target())
		cpufreq_stop_governor(policy);

	cpumask_clear_cpu(cpu, policy->cpus);

	if (policy_is_inactive(policy)) {
		if (has_target())
			strncpy(policy->last_governor, policy->governor->name,
				CPUFREQ_NAME_LEN);
		else
			policy->last_policy = policy->policy;
	} else if (cpu == policy->cpu) {
		/* Nominate new CPU */
	if (!policy_is_inactive(policy)) {
		/* Nominate a new CPU if necessary. */
		if (cpu == policy->cpu)
			policy->cpu = cpumask_any(policy->cpus);
	}

	/* Start governor again for active policy */
	if (!policy_is_inactive(policy)) {
		/* Start the governor again for the active policy. */
		if (has_target()) {
			ret = cpufreq_start_governor(policy);
			if (ret)
				pr_err("%s: Failed to start governor\n", __func__);
		}

		goto unlock;
		return;
	}

	if (has_target())
		strncpy(policy->last_governor, policy->governor->name,
			CPUFREQ_NAME_LEN);
	else
		policy->last_policy = policy->policy;

	if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
		cpufreq_cooling_unregister(policy->cdev);
		policy->cdev = NULL;
@@ -1634,8 +1628,24 @@ static int cpufreq_offline(unsigned int cpu)
		cpufreq_driver->exit(policy);
		policy->freq_table = NULL;
	}
}

static int cpufreq_offline(unsigned int cpu)
{
	struct cpufreq_policy *policy;

	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);

	policy = cpufreq_cpu_get_raw(cpu);
	if (!policy) {
		pr_debug("%s: No cpu_data found\n", __func__);
		return 0;
	}

	down_write(&policy->rwsem);

	__cpufreq_offline(cpu, policy);

unlock:
	up_write(&policy->rwsem);
	return 0;
}
@@ -1653,20 +1663,26 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
	if (!policy)
		return;

	down_write(&policy->rwsem);

	if (cpu_online(cpu))
		cpufreq_offline(cpu);
		__cpufreq_offline(cpu, policy);

	cpumask_clear_cpu(cpu, policy->real_cpus);
	remove_cpu_dev_symlink(policy, dev);
	remove_cpu_dev_symlink(policy, cpu, dev);

	if (!cpumask_empty(policy->real_cpus)) {
		up_write(&policy->rwsem);
		return;
	}

	if (cpumask_empty(policy->real_cpus)) {
	/* We did light-weight exit earlier, do full tear down now */
	if (cpufreq_driver->offline)
		cpufreq_driver->exit(policy);

	up_write(&policy->rwsem);

	cpufreq_policy_free(policy);
}
}

/**
 * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
@@ -1707,6 +1723,16 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
		return new_freq;

	if (policy->cur != new_freq) {
		/*
		 * For some platforms, the frequency returned by hardware may be
		 * slightly different from what is provided in the frequency
		 * table, for example hardware may return 499 MHz instead of 500
		 * MHz. In such cases it is better to avoid getting into
		 * unnecessary frequency updates.
		 */
		if (abs(policy->cur - new_freq) < HZ_PER_MHZ)
			return policy->cur;

		cpufreq_out_of_sync(policy, new_freq);
		if (update)
			schedule_work(&policy->update);
Loading