Unverified Commit e76d7e52 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!9157 [OLK-6.6] cpufreq: ACPI: add ITMT support when CPPC enabled

Merge Pull Request from: @leoliu-oc 
 
The _CPC method can get per-core highest frequency.
The highest frequency may varies between cores which mean cores can running at different max frequency, so can use it as a core priority and give a hint to scheduler in order to put critical task to the
higher priority core.

### Issue
https://gitee.com/openeuler/kernel/issues/I8WZDQ

### Default config Changes
N/A

### Test
Pass: The test program runs on the target processor core and meets expectations 
 
Link:https://gitee.com/openeuler/kernel/pulls/9157

 

Reviewed-by: default avatarJason Zeng <jason.zeng@intel.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 6fa3b3e4 211bd3be
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -122,6 +122,7 @@ int sched_set_itmt_support(void)

	return 0;
}
EXPORT_SYMBOL_GPL(sched_set_itmt_support);

/**
 * sched_clear_itmt_support() - Revoke platform's support of ITMT
@@ -181,3 +182,4 @@ void sched_set_itmt_core_prio(int prio, int cpu)
{
	per_cpu(sched_core_priority, cpu) = prio;
}
EXPORT_SYMBOL_GPL(sched_set_itmt_core_prio);
+68 −13
Original line number Diff line number Diff line
@@ -628,28 +628,35 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
#endif

#ifdef CONFIG_ACPI_CPPC_LIB
static u64 get_max_boost_ratio(unsigned int cpu)
static bool cppc_highest_perf_diff;
static struct cpumask core_prior_mask;

static void cppc_get_highest_nominal_perf(int cpu, u64 *highest_perf, u64 *nominal_perf)
{
	struct cppc_perf_caps perf_caps;
	u64 highest_perf, nominal_perf;
	int ret;

	if (acpi_pstate_strict)
		return 0;

	ret = cppc_get_perf_caps(cpu, &perf_caps);
	if (ret) {
		pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
			 cpu, ret);
		return 0;
		pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", cpu, ret);
		return;
	}

	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
		highest_perf = amd_get_highest_perf();
		*highest_perf = amd_get_highest_perf();
	else
		highest_perf = perf_caps.highest_perf;
		*highest_perf = perf_caps.highest_perf;

	*nominal_perf = perf_caps.nominal_perf;
}

static u64 get_max_boost_ratio(unsigned int cpu)
{
	u64 highest_perf, nominal_perf;

	nominal_perf = perf_caps.nominal_perf;
	if (acpi_pstate_strict)
		return 0;

	cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf);

	if (!highest_perf || !nominal_perf) {
		pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
@@ -663,8 +670,51 @@ static u64 get_max_boost_ratio(unsigned int cpu)

	return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}

/* The work item is needed to avoid CPU hotplug locking issues */
static void cpufreq_sched_itmt_work_fn(struct work_struct *work)
{
	sched_set_itmt_support();
}

static DECLARE_WORK(sched_itmt_work, cpufreq_sched_itmt_work_fn);

static void cpufreq_set_itmt_prio(int cpu)
{
	u64 highest_perf, nominal_perf;
	static u64 max_highest_perf = 0, min_highest_perf = U64_MAX;

	cppc_get_highest_nominal_perf(cpu, &highest_perf, &nominal_perf);

	sched_set_itmt_core_prio(highest_perf, cpu);
	cpumask_set_cpu(cpu, &core_prior_mask);

	if (max_highest_perf <= min_highest_perf) {
		if (highest_perf > max_highest_perf)
			max_highest_perf = highest_perf;

		if (highest_perf < min_highest_perf)
			min_highest_perf = highest_perf;

		if (max_highest_perf > min_highest_perf) {
			/*
			 * This code can be run during CPU online under the
			 * CPU hotplug locks, so sched_set_itmt_support()
			 * cannot be called from here.  Queue up a work item
			 * to invoke it.
			 */
			cppc_highest_perf_diff = true;
		}
	}

	if (cppc_highest_perf_diff && cpumask_equal(&core_prior_mask, cpu_online_mask)) {
		pr_debug("queue a work to set itmt enabled\n");
		schedule_work(&sched_itmt_work);
	}
}
#else
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
static inline void cpufreq_set_itmt_prio(int cpu) { }
#endif

static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -677,7 +727,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
	unsigned int valid_states = 0;
	unsigned int result = 0;
	u64 max_boost_ratio;
	unsigned int i;
	unsigned int i, j;
#ifdef CONFIG_SMP
	static int blacklisted;
#endif
@@ -741,6 +791,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
		pr_info_once("overriding BIOS provided _PSD data\n");
	}
#endif
	if (c->x86_vendor == X86_VENDOR_CENTAUR || c->x86_vendor == X86_VENDOR_ZHAOXIN) {
		for_each_cpu(j, policy->cpus) {
			cpufreq_set_itmt_prio(j);
		}
	}

	/* capability check */
	if (perf->state_count <= 1) {
+11 −0
Original line number Diff line number Diff line
@@ -2707,6 +2707,17 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
		}
	}

#if IS_ENABLED(CONFIG_X86)
	if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR ||
	     boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN)	&&
	    (boot_cpu_data.x86 == 7 && boot_cpu_data.x86_model == 0x5b)) {
		for_each_cpu(i, cpu_map) {
			for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent)
				sd->flags |= SD_ASYM_PACKING;
		}
	}
#endif

	/* Calculate CPU capacity for physical packages and nodes */
	for (i = nr_cpumask_bits-1; i >= 0; i--) {
		if (!cpumask_test_cpu(i, cpu_map))