Unverified Commit ca2555dd authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!13851 intel: backport intel_pstate driver update from 6.11

Merge Pull Request from: @jiayingbao 
 
backport intel_pstate driver from 6.11, including 
Allow model specific EPPs and Update Balance performance EPP for SPR and EMR
Support highest performance change interrupt
Other fix and update

test:
pstate driver sysfs works as expected.

 
 
Link:https://gitee.com/openeuler/kernel/pulls/13851

 

Reviewed-by: default avatarJason Zeng <jason.zeng@intel.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 1071a285 49d825a6
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -380,6 +380,7 @@
#define X86_FEATURE_HWP_ACT_WINDOW	(14*32+ 9) /* HWP Activity Window */
#define X86_FEATURE_HWP_EPP		(14*32+10) /* HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ		(14*32+11) /* HWP Package Level Request */
#define X86_FEATURE_HWP_HIGHEST_PERF_CHANGE (14*32+15) /* "" HWP Highest perf change */
#define X86_FEATURE_HFI			(14*32+19) /* Hardware Feedback Interface */

/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
+133 −123
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
#include <linux/acpi.h>
#include <linux/vmalloc.h>
#include <linux/pm_qos.h>
#include <linux/bitfield.h>
#include <trace/events/power.h>

#include <asm/cpu.h>
@@ -172,7 +173,6 @@ struct vid_data {
 *			based on the MSR_IA32_MISC_ENABLE value and whether or
 *			not the maximum reported turbo P-state is different from
 *			the maximum reported non-turbo one.
 * @turbo_disabled_mf:	The @turbo_disabled value reflected by cpuinfo.max_freq.
 * @min_perf_pct:	Minimum capacity limit in percent of the maximum turbo
 *			P-state capacity.
 * @max_perf_pct:	Maximum capacity limit in percent of the maximum turbo
@@ -181,7 +181,6 @@ struct vid_data {
struct global_params {
	bool no_turbo;
	bool turbo_disabled;
	bool turbo_disabled_mf;
	int max_perf_pct;
	int min_perf_pct;
};
@@ -201,8 +200,6 @@ struct global_params {
 * @prev_aperf:		Last APERF value read from APERF MSR
 * @prev_mperf:		Last MPERF value read from MPERF MSR
 * @prev_tsc:		Last timestamp counter (TSC) value
 * @prev_cummulative_iowait: IO Wait time difference from last and
 *			current sample
 * @sample:		Storage for storing last Sample data
 * @min_perf_ratio:	Minimum capacity in terms of PERF or HWP ratios
 * @max_perf_ratio:	Maximum capacity in terms of PERF or HWP ratios
@@ -214,7 +211,7 @@ struct global_params {
 * @epp_policy:		Last saved policy used to set EPP/EPB
 * @epp_default:	Power on default HWP energy performance
 *			preference/bias
 * @epp_cached		Cached HWP energy-performance preference value
 * @epp_cached:		Cached HWP energy-performance preference value
 * @hwp_req_cached:	Cached value of the last HWP Request MSR
 * @hwp_cap_cached:	Cached value of the last HWP Capabilities MSR
 * @last_io_update:	Last time when IO wake flag was set
@@ -241,7 +238,6 @@ struct cpudata {
	u64	prev_aperf;
	u64	prev_mperf;
	u64	prev_tsc;
	u64	prev_cummulative_iowait;
	struct sample sample;
	int32_t	min_perf_ratio;
	int32_t	max_perf_ratio;
@@ -294,11 +290,11 @@ struct pstate_funcs {

static struct pstate_funcs pstate_funcs __read_mostly;

static int hwp_active __read_mostly;
static int hwp_mode_bdw __read_mostly;
static bool per_cpu_limits __read_mostly;
static bool hwp_active __ro_after_init;
static int hwp_mode_bdw __ro_after_init;
static bool per_cpu_limits __ro_after_init;
static bool hwp_forced __ro_after_init;
static bool hwp_boost __read_mostly;
static bool hwp_forced __read_mostly;

static struct cpufreq_driver *intel_pstate_driver __read_mostly;

@@ -592,12 +588,13 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
	cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
}

static inline void update_turbo_state(void)
static bool turbo_is_disabled(void)
{
	u64 misc_en;

	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
	global.turbo_disabled = misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE;

	return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
}

static int min_perf_pct_min(void)
@@ -1152,12 +1149,16 @@ static void intel_pstate_update_policies(void)
static void __intel_pstate_update_max_freq(struct cpudata *cpudata,
					   struct cpufreq_policy *policy)
{
	policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
	if (hwp_active)
		intel_pstate_get_hwp_cap(cpudata);

	policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
			cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;

	refresh_frequency_limits(policy);
}

static void intel_pstate_update_max_freq(unsigned int cpu)
static void intel_pstate_update_limits(unsigned int cpu)
{
	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);

@@ -1169,25 +1170,12 @@ static void intel_pstate_update_max_freq(unsigned int cpu)
	cpufreq_cpu_release(policy);
}

static void intel_pstate_update_limits(unsigned int cpu)
static void intel_pstate_update_limits_for_all(void)
{
	mutex_lock(&intel_pstate_driver_lock);
	int cpu;

	update_turbo_state();
	/*
	 * If turbo has been turned on or off globally, policy limits for
	 * all CPUs need to be updated to reflect that.
	 */
	if (global.turbo_disabled_mf != global.turbo_disabled) {
		global.turbo_disabled_mf = global.turbo_disabled;
		arch_set_max_freq_ratio(global.turbo_disabled);
	for_each_possible_cpu(cpu)
			intel_pstate_update_max_freq(cpu);
	} else {
		cpufreq_update_policy(cpu);
	}

	mutex_unlock(&intel_pstate_driver_lock);
		intel_pstate_update_limits(cpu);
}

/************************** sysfs begin ************************/
@@ -1285,10 +1273,6 @@ static ssize_t show_no_turbo(struct kobject *kobj,
		return -EAGAIN;
	}

	update_turbo_state();
	if (global.turbo_disabled)
		ret = sprintf(buf, "%u\n", global.turbo_disabled);
	else
	ret = sprintf(buf, "%u\n", global.no_turbo);

	mutex_unlock(&intel_pstate_driver_lock);
@@ -1300,32 +1284,39 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
			      const char *buf, size_t count)
{
	unsigned int input;
	int ret;
	bool no_turbo;

	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
	if (sscanf(buf, "%u", &input) != 1)
		return -EINVAL;

	mutex_lock(&intel_pstate_driver_lock);

	if (!intel_pstate_driver) {
		mutex_unlock(&intel_pstate_driver_lock);
		return -EAGAIN;
		count = -EAGAIN;
		goto unlock_driver;
	}

	mutex_lock(&intel_pstate_limits_lock);
	no_turbo = !!clamp_t(int, input, 0, 1);

	update_turbo_state();
	if (global.turbo_disabled) {
		pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
		mutex_unlock(&intel_pstate_limits_lock);
		mutex_unlock(&intel_pstate_driver_lock);
		return -EPERM;
	WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
	if (global.turbo_disabled && !no_turbo) {
		pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
		count = -EPERM;
		if (global.no_turbo)
			goto unlock_driver;
		else
			no_turbo = 1;
	}

	global.no_turbo = clamp_t(int, input, 0, 1);
	if (no_turbo == global.no_turbo) {
		goto unlock_driver;
	}

	if (global.no_turbo) {
	WRITE_ONCE(global.no_turbo, no_turbo);

	mutex_lock(&intel_pstate_limits_lock);

	if (no_turbo) {
		struct cpudata *cpu = all_cpu_data[0];
		int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;

@@ -1336,9 +1327,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,

	mutex_unlock(&intel_pstate_limits_lock);

	intel_pstate_update_policies();
	arch_set_max_freq_ratio(global.no_turbo);
	intel_pstate_update_limits_for_all();
	arch_set_max_freq_ratio(no_turbo);

unlock_driver:
	mutex_unlock(&intel_pstate_driver_lock);

	return count;
@@ -1619,7 +1611,6 @@ static void intel_pstate_notify_work(struct work_struct *work)
	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpudata->cpu);

	if (policy) {
		intel_pstate_get_hwp_cap(cpudata);
		__intel_pstate_update_max_freq(cpudata, policy);

		cpufreq_cpu_release(policy);
@@ -1631,18 +1622,24 @@ static void intel_pstate_notify_work(struct work_struct *work)
static DEFINE_RAW_SPINLOCK(hwp_notify_lock);
static cpumask_t hwp_intr_enable_mask;

#define HWP_GUARANTEED_PERF_CHANGE_STATUS      BIT(0)
#define HWP_HIGHEST_PERF_CHANGE_STATUS         BIT(3)

void notify_hwp_interrupt(void)
{
	unsigned int this_cpu = smp_processor_id();
	struct cpudata *cpudata;
	u64 value, status_mask;
	unsigned long flags;
	u64 value;

	if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
	if (!hwp_active || !cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
		return;

	status_mask = HWP_GUARANTEED_PERF_CHANGE_STATUS;
	if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
		status_mask |= HWP_HIGHEST_PERF_CHANGE_STATUS;

	rdmsrl_safe(MSR_HWP_STATUS, &value);
	if (!(value & 0x01))
	if (!(value & status_mask))
		return;

	raw_spin_lock_irqsave(&hwp_notify_lock, flags);
@@ -1650,24 +1647,8 @@ void notify_hwp_interrupt(void)
	if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
		goto ack_intr;

	/*
	 * Currently we never free all_cpu_data. And we can't reach here
	 * without this allocated. But for safety for future changes, added
	 * check.
	 */
	if (unlikely(!READ_ONCE(all_cpu_data)))
		goto ack_intr;

	/*
	 * The free is done during cleanup, when cpufreq registry is failed.
	 * We wouldn't be here if it fails on init or switch status. But for
	 * future changes, added check.
	 */
	cpudata = READ_ONCE(all_cpu_data[this_cpu]);
	if (unlikely(!cpudata))
		goto ack_intr;

	schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
	schedule_delayed_work(&all_cpu_data[this_cpu]->hwp_notify_work,
			msecs_to_jiffies(10));

	raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);

@@ -1681,23 +1662,30 @@ void notify_hwp_interrupt(void)
static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{
	unsigned long flags;
	bool cancel_work;

	if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
	if (!cpu_feature_enabled(X86_FEATURE_HWP_NOTIFY))
		return;

	/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
	wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);

	raw_spin_lock_irqsave(&hwp_notify_lock, flags);
	if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
		cancel_delayed_work(&cpudata->hwp_notify_work);
	cancel_work = cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask);
	raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);

	if (cancel_work)
		cancel_delayed_work_sync(&cpudata->hwp_notify_work);
}

#define HWP_GUARANTEED_PERF_CHANGE_REQ BIT(0)
#define HWP_HIGHEST_PERF_CHANGE_REQ    BIT(2)

static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
{
	/* Enable HWP notification interrupt for guaranteed performance change */
	/* Enable HWP notification interrupt for performance change */
	if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
		u64 interrupt_mask = HWP_GUARANTEED_PERF_CHANGE_REQ;
		unsigned long flags;

		raw_spin_lock_irqsave(&hwp_notify_lock, flags);
@@ -1705,8 +1693,11 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
		cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
		raw_spin_unlock_irqrestore(&hwp_notify_lock, flags);

		if (cpu_feature_enabled(X86_FEATURE_HWP_HIGHEST_PERF_CHANGE))
			interrupt_mask |= HWP_HIGHEST_PERF_CHANGE_REQ;

		/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, interrupt_mask);
		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
	}
}
@@ -1715,13 +1706,6 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
{
	cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);

	/*
	 * If this CPU gen doesn't call for change in balance_perf
	 * EPP return.
	 */
	if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
		return;

	/*
	 * If the EPP is set by firmware, which means that firmware enabled HWP
	 * - Is equal or less than 0x80 (default balance_perf EPP)
@@ -1734,6 +1718,13 @@ static void intel_pstate_update_epp_defaults(struct cpudata *cpudata)
		return;
	}

	/*
	 * If this CPU gen doesn't call for change in balance_perf
	 * EPP return.
	 */
	if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE)
		return;

	/*
	 * Use hard coded value per gen to update the balance_perf
	 * and default EPP.
@@ -1789,7 +1780,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
	u32 vid;

	val = (u64)pstate << 8;
	if (global.no_turbo && !global.turbo_disabled)
	if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
		val |= (u64)1 << 32;

	vid_fp = cpudata->vid.min + mul_fp(
@@ -1954,7 +1945,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
	u64 val;

	val = (u64)pstate << 8;
	if (global.no_turbo && !global.turbo_disabled)
	if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
		val |= (u64)1 << 32;

	return val;
@@ -2027,14 +2018,6 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}

static void intel_pstate_max_within_limits(struct cpudata *cpu)
{
	int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);

	update_turbo_state();
	intel_pstate_set_pstate(cpu, pstate);
}

static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
	int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
@@ -2260,7 +2243,7 @@ static inline int32_t get_target_pstate(struct cpudata *cpu)

	sample->busy_scaled = busy_frac * 100;

	target = global.no_turbo || global.turbo_disabled ?
	target = READ_ONCE(global.no_turbo) ?
			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
	target += target >> 2;
	target = mul_fp(target, busy_frac);
@@ -2304,8 +2287,6 @@ static void intel_pstate_adjust_pstate(struct cpudata *cpu)
	struct sample *sample;
	int target_pstate;

	update_turbo_state();

	target_pstate = get_target_pstate(cpu);
	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
	trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
@@ -2430,10 +2411,12 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
	X86_MATCH(ICELAKE_X,		core_funcs),
	X86_MATCH(TIGERLAKE,		core_funcs),
	X86_MATCH(SAPPHIRERAPIDS_X,	core_funcs),
	X86_MATCH(EMERALDRAPIDS_X,      core_funcs),
	{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);

#ifdef CONFIG_ACPI
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
	X86_MATCH(BROADWELL_D,		core_funcs),
	X86_MATCH(BROADWELL_X,		core_funcs),
@@ -2442,6 +2425,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
	X86_MATCH(SAPPHIRERAPIDS_X,	core_funcs),
	{}
};
#endif

static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
	X86_MATCH(KABYLAKE,		core_funcs),
@@ -2523,7 +2507,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)

static int intel_pstate_get_max_freq(struct cpudata *cpu)
{
	return global.turbo_disabled || global.no_turbo ?
	return READ_ONCE(global.no_turbo) ?
			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
}

@@ -2608,12 +2592,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
	intel_pstate_update_perf_limits(cpu, policy->min, policy->max);

	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
		int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);

		/*
		 * NOHZ_FULL CPUs need this as the governor callback may not
		 * be invoked on them.
		 */
		intel_pstate_clear_update_util_hook(policy->cpu);
		intel_pstate_max_within_limits(cpu);
		intel_pstate_set_pstate(cpu, pstate);
	} else {
		intel_pstate_set_update_util_hook(policy->cpu);
	}
@@ -2656,10 +2642,9 @@ static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
{
	int max_freq;

	update_turbo_state();
	if (hwp_active) {
		intel_pstate_get_hwp_cap(cpu);
		max_freq = global.no_turbo || global.turbo_disabled ?
		max_freq = READ_ONCE(global.no_turbo) ?
				cpu->pstate.max_freq : cpu->pstate.turbo_freq;
	} else {
		max_freq = intel_pstate_get_max_freq(cpu);
@@ -2753,9 +2738,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)

	/* cpuinfo and default policy values */
	policy->cpuinfo.min_freq = cpu->pstate.min_freq;
	update_turbo_state();
	global.turbo_disabled_mf = global.turbo_disabled;
	policy->cpuinfo.max_freq = global.turbo_disabled ?
	policy->cpuinfo.max_freq = READ_ONCE(global.no_turbo) ?
			cpu->pstate.max_freq : cpu->pstate.turbo_freq;

	policy->min = policy->cpuinfo.min_freq;
@@ -2920,8 +2903,6 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
	struct cpufreq_freqs freqs;
	int target_pstate;

	update_turbo_state();

	freqs.old = policy->cur;
	freqs.new = target_freq;

@@ -2943,8 +2924,6 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
	struct cpudata *cpu = all_cpu_data[policy->cpu];
	int target_pstate;

	update_turbo_state();

	target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);

	target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
@@ -2962,8 +2941,8 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
	int old_pstate = cpu->pstate.current_pstate;
	int cap_pstate, min_pstate, max_pstate, target_pstate;

	update_turbo_state();
	cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
	cap_pstate = READ_ONCE(global.no_turbo) ?
					HWP_GUARANTEED_PERF(hwp_cap) :
					HWP_HIGHEST_PERF(hwp_cap);

	/* Optimization: Avoid unnecessary divisions. */
@@ -3132,10 +3111,8 @@ static void intel_pstate_driver_cleanup(void)
			if (intel_pstate_driver == &intel_pstate)
				intel_pstate_clear_update_util_hook(cpu);

			raw_spin_lock(&hwp_notify_lock);
			kfree(all_cpu_data[cpu]);
			WRITE_ONCE(all_cpu_data[cpu], NULL);
			raw_spin_unlock(&hwp_notify_lock);
		}
	}
	cpus_read_unlock();
@@ -3152,6 +3129,10 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)

	memset(&global, 0, sizeof(global));
	global.max_perf_pct = 100;
	global.turbo_disabled = turbo_is_disabled();
	global.no_turbo = global.turbo_disabled;

	arch_set_max_freq_ratio(global.turbo_disabled);

	intel_pstate_driver = driver;
	ret = cpufreq_register_driver(intel_pstate_driver);
@@ -3402,14 +3383,30 @@ static bool intel_pstate_hwp_is_enabled(void)
	return !!(value & 0x1);
}

static const struct x86_cpu_id intel_epp_balance_perf[] = {
#define POWERSAVE_MASK			GENMASK(7, 0)
#define BALANCE_POWER_MASK		GENMASK(15, 8)
#define BALANCE_PERFORMANCE_MASK	GENMASK(23, 16)
#define PERFORMANCE_MASK		GENMASK(31, 24)

#define HWP_SET_EPP_VALUES(powersave, balance_power, balance_perf, performance) \
	(FIELD_PREP_CONST(POWERSAVE_MASK, powersave) |\
	 FIELD_PREP_CONST(BALANCE_POWER_MASK, balance_power) |\
	 FIELD_PREP_CONST(BALANCE_PERFORMANCE_MASK, balance_perf) |\
	 FIELD_PREP_CONST(PERFORMANCE_MASK, performance))

#define HWP_SET_DEF_BALANCE_PERF_EPP(balance_perf) \
	(HWP_SET_EPP_VALUES(HWP_EPP_POWERSAVE, HWP_EPP_BALANCE_POWERSAVE,\
	 balance_perf, HWP_EPP_PERFORMANCE))

static const struct x86_cpu_id intel_epp_default[] = {
	/*
	 * Set EPP value as 102, this is the max suggested EPP
	 * which can result in one core turbo frequency for
	 * AlderLake Mobile CPUs.
	 */
	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 102),
	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 32),
	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, HWP_SET_DEF_BALANCE_PERF_EPP(102)),
	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, HWP_SET_DEF_BALANCE_PERF_EPP(32)),
	{}
};

@@ -3441,7 +3438,7 @@ static int __init intel_pstate_init(void)
		 * deal with it.
		 */
		if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
			WRITE_ONCE(hwp_active, 1);
			hwp_active = true;
			hwp_mode_bdw = id->driver_data;
			intel_pstate.attr = hwp_cpufreq_attrs;
			intel_cpufreq.attr = hwp_cpufreq_attrs;
@@ -3502,10 +3499,23 @@ static int __init intel_pstate_init(void)
	intel_pstate_sysfs_expose_params();

	if (hwp_active) {
		const struct x86_cpu_id *id = x86_match_cpu(intel_epp_balance_perf);
		const struct x86_cpu_id *id = x86_match_cpu(intel_epp_default);

		if (id)
			epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = id->driver_data;
		if (id) {
			epp_values[EPP_INDEX_POWERSAVE] =
				FIELD_GET(POWERSAVE_MASK, id->driver_data);
			epp_values[EPP_INDEX_BALANCE_POWERSAVE] =
				FIELD_GET(BALANCE_POWER_MASK, id->driver_data);
			epp_values[EPP_INDEX_BALANCE_PERFORMANCE] =
				FIELD_GET(BALANCE_PERFORMANCE_MASK, id->driver_data);
			epp_values[EPP_INDEX_PERFORMANCE] =
				FIELD_GET(PERFORMANCE_MASK, id->driver_data);
			pr_debug("Updated EPPs powersave:%x balanced power:%x balanced perf:%x performance:%x\n",
				epp_values[EPP_INDEX_POWERSAVE],
				epp_values[EPP_INDEX_BALANCE_POWERSAVE],
				epp_values[EPP_INDEX_BALANCE_PERFORMANCE],
				epp_values[EPP_INDEX_PERFORMANCE]);
		}
	}

	mutex_lock(&intel_pstate_driver_lock);