Unverified Commit 87a8a526 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!14179 [OLK-6.6] update ITMT support patch for Zhaoxin CPUs

Merge Pull Request from: @leoliu-oc 
 
Include 3 patches:
1. 0001-Revert-cpufreq-ACPI-add-ITMT-support-when-CPPC-enabl.patch
2. 0002-cpufreq-ACPI-add-ITMT-support-when-CPPC-enabled.patch
3. 0003-cpufreq-acpi-cpufreq-Zhaoxin-fix-incorrect-max-freq-.patch

Roll back the previous old patch, update to the new patch version,
and fix the issue of mismatched max freq.

### Issue
https://gitee.com/openeuler/kernel/issues/IBB6OU

### Test
Pass.
 
 
Link:https://gitee.com/openeuler/kernel/pulls/14179

 

Reviewed-by: default avatarJason Zeng <jason.zeng@intel.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 3a9c0543 65940a8a
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -122,7 +122,7 @@ int sched_set_itmt_support(void)

	return 0;
}
EXPORT_SYMBOL_GPL(sched_set_itmt_support);
EXPORT_SYMBOL(sched_set_itmt_support);

/**
 * sched_clear_itmt_support() - Revoke platform's support of ITMT
@@ -182,4 +182,4 @@ void sched_set_itmt_core_prio(int prio, int cpu)
{
	per_cpu(sched_core_priority, cpu) = prio;
}
EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio);
EXPORT_SYMBOL(sched_set_itmt_core_prio);
+50 −46
Original line number Diff line number Diff line
@@ -627,31 +627,25 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
}
#endif

#ifdef CONFIG_ACPI_CPPC_LIB
static bool cppc_highest_perf_diff;
static struct cpumask core_prior_mask;

static void cppc_get_highest_nominal_perf(int cpu, u64 *highest_perf, u64 *nominal_perf)
/* The work item is needed to avoid CPU hotplug locking issues */
static void sched_itmt_work_fn(struct work_struct *work)
{
	struct cppc_perf_caps perf_caps;
	int ret;

	ret = cppc_get_perf_caps(cpu, &perf_caps);
	if (ret) {
		pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret);
		return;
	sched_set_itmt_support();
}
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
		*highest_perf = amd_get_highest_perf();
	else
		*highest_perf = perf_caps.highest_perf;

	*nominal_perf = perf_caps.nominal_perf;
static DECLARE_WORK(sched_itmt_work, sched_itmt_work_fn);

static void sched_set_itmt(void)
{
	schedule_work(&sched_itmt_work);
}

#ifdef CONFIG_ACPI_CPPC_LIB
static u64 get_max_boost_ratio(unsigned int cpu)
{
	struct cppc_perf_caps perf_caps;
	u64 highest_perf, nominal_perf;
	int ret;

	if (acpi_pstate_strict)
		return 0;
@@ -660,7 +654,19 @@ static u64 get_max_boost_ratio(unsigned int cpu)
	    boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR)
		return 0;

	cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf);
	ret = cppc_get_perf_caps(cpu, &perf_caps);
	if (ret) {
		pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
			 cpu, ret);
		return 0;
	}

	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
		highest_perf = amd_get_highest_perf();
	else
		highest_perf = perf_caps.highest_perf;

	nominal_perf = perf_caps.nominal_perf;

	if (!highest_perf || !nominal_perf) {
		pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
@@ -675,23 +681,22 @@ static u64 get_max_boost_ratio(unsigned int cpu)
	return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}

/* The work item is needed to avoid CPU hotplug locking issues */
static void cpufreq_sched_itmt_work_fn(struct work_struct *work)
{
	sched_set_itmt_support();
}

static DECLARE_WORK(sched_itmt_work, cpufreq_sched_itmt_work_fn);
static bool cppc_highest_perf_diff;
static struct cpumask core_prio_cpumask;

static void cpufreq_set_itmt_prio(int cpu)
static void core_set_itmt_prio(int cpu)
{
	u64 highest_perf, nominal_perf;
	u64 highest_perf = 0;
	int ret = 0;
	static u64 max_highest_perf = 0, min_highest_perf = U64_MAX;

	cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf);

	ret = cppc_get_highest_perf(cpu, &highest_perf);
	if (ret) {
		pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret);
		return;
	}
	sched_set_itmt_core_prio(highest_perf, cpu);
	cpumask_set_cpu(cpu, &core_prior_mask);
	cpumask_set_cpu(cpu, &core_prio_cpumask);

	if (max_highest_perf <= min_highest_perf) {
		if (highest_perf > max_highest_perf)
@@ -700,25 +705,24 @@ static void cpufreq_set_itmt_prio(int cpu)
		if (highest_perf < min_highest_perf)
			min_highest_perf = highest_perf;

		if (max_highest_perf > min_highest_perf) {
			/*
			 * This code can be run during CPU online under the
			 * CPU hotplug locks, so sched_set_itmt_support()
			 * cannot be called from here.  Queue up a work item
			 * to invoke it.
			 */
		if (max_highest_perf > min_highest_perf)
			cppc_highest_perf_diff = true;
	}
	}

	if (cppc_highest_perf_diff && cpumask_equal(&core_prior_mask, cpu_online_mask)) {
		pr_debug("queue a work to set itmt enabled\n");
		schedule_work(&sched_itmt_work);
	if (cppc_highest_perf_diff && cpumask_equal(&core_prio_cpumask, cpu_online_mask)) {
		/*
		 * This code  can be run during CPU online under the CPU hotplug locks,
		 * so sched_set_itmt cannot be called from here.
		 * queue a work item to invoke it
		 */
		pr_debug("queue a work to set itmt support and enable\n");
		sched_set_itmt();
	}
}

#else
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
static inline void cpufreq_set_itmt_prio(int cpu) { }
static void core_set_itmt_prio(int cpu) {}
#endif

static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -731,7 +735,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
	unsigned int valid_states = 0;
	unsigned int result = 0;
	u64 max_boost_ratio;
	unsigned int i, j;
	unsigned int i, j = 0;
#ifdef CONFIG_SMP
	static int blacklisted;
#endif
@@ -795,10 +799,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
		pr_info_once("overriding BIOS provided _PSD data\n");
	}
#endif

	if (c->x86_vendor == X86_VENDOR_CENTAUR || c->x86_vendor == X86_VENDOR_ZHAOXIN) {
		for_each_cpu(j, policy->cpus) {
			cpufreq_set_itmt_prio(j);
		}
		for_each_cpu(j, policy->cpus)
			core_set_itmt_prio(j);
	}

	/* capability check */