Commit 47b10b73 authored by Viresh Kumar's avatar Viresh Kumar Committed by Will Deacon
Browse files

arm64: topology: Reorder init_amu_fie() a bit



This patch does a couple of optimizations in init_amu_fie(), like early
exits from paths where we don't need to continue any further, avoid the
enable/disable dance, moving the calls to
topology_scale_freq_invariant() just when we need them, instead of at
the top of the routine, and avoiding calling it for the third time.

Reviewed-by: default avatarIonela Voinescu <ionela.voinescu@arm.com>
Signed-off-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Tested-by: default avatarIonela Voinescu <ionela.voinescu@arm.com>
Link: https://lore.kernel.org/r/a732e71ab9ec28c354eb28dd898c9b47d490863f.1610104461.git.viresh.kumar@linaro.org


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 384e5699
Loading
Loading
Loading
Loading
+14 −13
Original line number Diff line number Diff line
@@ -221,8 +221,8 @@ static DEFINE_STATIC_KEY_FALSE(amu_fie_key);

static int __init init_amu_fie(void)
{
	bool invariance_status = topology_scale_freq_invariant();
	cpumask_var_t valid_cpus;
	bool invariant;
	int ret = 0;
	int cpu;

@@ -249,18 +249,19 @@ static int __init init_amu_fie(void)
	if (cpumask_equal(valid_cpus, cpu_present_mask))
		cpumask_copy(amu_fie_cpus, cpu_present_mask);

	if (!cpumask_empty(amu_fie_cpus)) {
		pr_info("CPUs[%*pbl]: counters will be used for FIE.",
			cpumask_pr_args(amu_fie_cpus));
	if (cpumask_empty(amu_fie_cpus))
		goto free_valid_mask;

	invariant = topology_scale_freq_invariant();

	/* We aren't fully invariant yet */
	if (!invariant && !cpumask_equal(amu_fie_cpus, cpu_present_mask))
		goto free_valid_mask;

	static_branch_enable(&amu_fie_key);
	}

	/*
	 * If the system is not fully invariant after AMU init, disable
	 * partial use of counters for frequency invariance.
	 */
	if (!topology_scale_freq_invariant())
		static_branch_disable(&amu_fie_key);
	pr_info("CPUs[%*pbl]: counters will be used for FIE.",
		cpumask_pr_args(amu_fie_cpus));

	/*
	 * Task scheduler behavior depends on frequency invariance support,
@@ -268,7 +269,7 @@ static int __init init_amu_fie(void)
	 * a result of counter initialisation and use, retrigger the build of
	 * scheduling domains to ensure the information is propagated properly.
	 */
	if (invariance_status != topology_scale_freq_invariant())
	if (!invariant)
		rebuild_sched_domains_energy();

free_valid_mask: