Unverified Commit 0509d18d authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3712 sched: steal tasks to improve CPU utilization

Merge Pull Request from: @ci-robot 
 
PR sync from: Cheng Yu <serein.chengyu@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/E42KMNOMTG6QUHYGBWMRSJZ6Y2N4LHAV/ 
sched: steal tasks to improve CPU utilization

Cheng Jian (3):
  disable stealing by default
  sched/fair: introduce SCHED_STEAL
  config: enable CONFIG_SCHED_STEAL by default

Steve Sistare (10):
  sched: Provide sparsemask, a reduced contention bitmap
  sched/topology: Provide hooks to allocate data shared per LLC
  sched/topology: Provide cfs_overload_cpus bitmap
  sched/fair: Dynamically update cfs_overload_cpus
  sched/fair: Hoist idle_stamp up from idle_balance
  sched/fair: Generalize the detach_task interface
  sched/fair: Provide can_migrate_task_llc
  sched/fair: Steal work from an overloaded CPU when CPU goes idle
  sched/fair: disable stealing if too many NUMA nodes
  sched/fair: Provide idle search schedstats


-- 
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I8PIYZ 
 
Link:https://gitee.com/openeuler/kernel/pulls/3712

 

Reviewed-by: default avatarZucheng Zheng <zhengzucheng@huawei.com>
Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Reviewed-by: default avatarLiu Chao <liuchao173@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents ce4810ad 6863557a
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -193,6 +193,7 @@ CONFIG_IPC_NS=y
CONFIG_USER_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
CONFIG_SCHED_STEAL=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
+1 −0
Original line number Diff line number Diff line
@@ -215,6 +215,7 @@ CONFIG_IPC_NS=y
CONFIG_USER_NS=y
CONFIG_PID_NS=y
CONFIG_NET_NS=y
CONFIG_SCHED_STEAL=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_RELAY=y
+3 −0
Original line number Diff line number Diff line
@@ -82,6 +82,9 @@ struct sched_domain_shared {
	atomic_t	nr_busy_cpus;
	int		has_idle_cores;
	int		nr_idle_scan;
#ifdef CONFIG_SCHED_STEAL
	struct sparsemask *cfs_overload_cpus;
#endif
};

struct sched_domain {
+15 −0
Original line number Diff line number Diff line
@@ -1315,6 +1315,21 @@ config NET_NS

endif # NAMESPACES

config SCHED_STEAL
	bool "Steal tasks to improve CPU utilization"
	depends on SMP
	default n
	help
	  When a CPU has no more CFS tasks to run, and idle_balance() fails
	  to find a task, then attempt to steal a task from an overloaded
	  CPU in the same LLC. Maintain and use a bitmap of overloaded CPUs
	  to efficiently identify candidates.  To minimize search time, steal
	  the first migratable task that is found when the bitmap is traversed.
	  For fairness, search for migratable tasks on an overloaded CPU in
	  order of next to run.

	  If unsure, say N here.

config CHECKPOINT_RESTORE
	bool "Checkpoint/restore support"
	depends on PROC_FS
+33 −2
Original line number Diff line number Diff line
@@ -4609,17 +4609,48 @@ static int sysctl_numa_balancing(struct ctl_table *table, int write,

DEFINE_STATIC_KEY_FALSE(sched_schedstats);

#ifdef CONFIG_SCHED_STEAL
unsigned long schedstat_skid;

static void compute_skid(void)
{
	int i, n = 0;
	s64 t;
	int skid = 0;

	for (i = 0; i < 100; i++) {
		t = local_clock();
		t = local_clock() - t;
		if (t > 0 && t < 1000) {	/* only use sane samples */
			skid += (int) t;
			n++;
		}
	}

	if (n > 0)
		schedstat_skid = skid / n;
	else
		schedstat_skid = 0;
	pr_info("schedstat_skid = %lu\n", schedstat_skid);
}
#else
static inline void compute_skid(void) {}
#endif

static void set_schedstats(bool enabled)
{
	if (enabled)
	if (enabled) {
		compute_skid();
		static_branch_enable(&sched_schedstats);
	else
	} else {
		static_branch_disable(&sched_schedstats);
	}
}

void force_schedstat_enabled(void)
{
	if (!schedstat_enabled()) {
		compute_skid();
		pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
		static_branch_enable(&sched_schedstats);
	}
Loading