Commit 8ce3e706 authored by Tim Chen's avatar Tim Chen Committed by Jie Liu
Browse files

scheduler: Add runtime knob sysctl_sched_cluster

kunpeng inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5W44S
CVE: NA

Reference: https://lore.kernel.org/lkml/cover.1638563225.git.tim.c.chen@linux.intel.com/



----------------------------------------------------------------------

Allow run time configuration of the scheduler to use cluster
scheduling.  Configuration can be changed via the sysctl variable
/proc/sys/kernel/sched_cluster. Setting it to 1 enable cluster
scheduling and setting it to 0 turns it off.

Cluster scheduling should benefit independent tasks by load balancing
them between clusters.  It reaps the most benefit when the system's CPUs
are not fully busy, so we can spread the tasks out between the clusters to
reduce contention on cluster resource (e.g. L2 cache).

However, if the system is expected to operate close to full utilization,
the system admin could turn this feature off so as not to incur
extra load balancing overhead between the cluster domains.

Signed-off-by: default avatarTim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: default avatarJie Liu <liujie375@h-partners.com>
parent 211b6fb7
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -56,6 +56,7 @@
#include <linux/numa.h>
#include <linux/pgtable.h>
#include <linux/overflow.h>
#include <linux/cpuset.h>

#include <asm/acpi.h>
#include <asm/desc.h>
@@ -122,6 +123,13 @@ int arch_update_cpu_topology(void)
	return retval;
}

void arch_rebuild_cpu_topology(void)
{
	x86_topology_update = true;
	rebuild_sched_domains();
	x86_topology_update = false;
}

static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
{
	unsigned long flags;
+9 −4
Original line number Diff line number Diff line
@@ -114,16 +114,21 @@ int topology_update_cpu_topology(void)
	return update_topology;
}

void __weak arch_rebuild_cpu_topology(void)
{
	update_topology = 1;
	rebuild_sched_domains();
	pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
	update_topology = 0;
}

/*
 * Updating the sched_domains can't be done directly from cpufreq callbacks
 * due to locking, so queue the work for later.
 */
static void update_topology_flags_workfn(struct work_struct *work)
{
	update_topology = 1;
	rebuild_sched_domains();
	pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
	update_topology = 0;
	arch_rebuild_cpu_topology();
}

static DEFINE_PER_CPU(u32, freq_factor) = 1;
+6 −0
Original line number Diff line number Diff line
@@ -111,4 +111,10 @@ int sched_energy_aware_handler(struct ctl_table *table, int write,
		void *buffer, size_t *lenp, loff_t *ppos);
#endif

#ifdef CONFIG_SCHED_CLUSTER
extern unsigned int sysctl_sched_cluster;
int sched_cluster_handler(struct ctl_table *table, int write,
			  void *buffer, size_t *lenp, loff_t *ppos);
#endif

#endif /* _LINUX_SCHED_SYSCTL_H */
+1 −0
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@
		if (nr_cpus_node(node))

int arch_update_cpu_topology(void);
void arch_rebuild_cpu_topology(void);

/* Conform to ACPI 2.0 SLIT distance definitions */
#define LOCAL_DISTANCE		10
+1 −0
Original line number Diff line number Diff line
@@ -8097,6 +8097,7 @@ int sched_cpu_dying(unsigned int cpu)
void __init sched_init_smp(void)
{
	sched_init_numa();
	set_sched_cluster();

	/*
	 * There's no userspace yet to cause hotplug operations; hence all the
Loading