Commit 1a553561 authored by Wang ShaoBo's avatar Wang ShaoBo Committed by yanhaitao
Browse files

sched: smart grid: init sched_grid_qos structure on QOS purpose

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I8WMOG


CVE: NA

----------------------------------------

As smart grid scheduling (SGS) may shrink resources and affect task QOS,
We provide methods for evaluating task QOS in divided grid, we mainly
focus on the following two aspects:

   1. Evaluate whether (such as CPU or memory) resources meet our demand
   2. Ensure the least impact when working with (cpufreq and cpuidle)
      governors

For tackling this questions, we have summarized several sampling methods
to obtain tasks' characteristics at same time reducing scheduling noise
as much as possible:

  1. we detected the key factors that how sensitive a process is in cpufreq
     or cpuidle adjustment, and to guide the cpufreq/cpuidle governor
  2. We dynamically monitor process memory bandwidth and adjust memory
     allocation to minimize cross-remote memory access
  3. We provide a variety of load tracking mechanisms to adapt to different
     types of task's load change

     ---------------------------------     -----------------
    |            class A              |   |     class B     |
    |    --------        --------     |   |     --------    |
    |   | group0 |      | group1 |    |---|    | group2 |   |
    |    --------        --------     |   |     --------    |
    |    CPU/memory sensitive type    |   |   balance type  |
     ----------------+----------------     ------+-------+--
                     v                           v       | (target cpufreq)
     ----------------------------------------------      | (sensitivity)
    |              Not satisfied with QOS?         |     |
     --------------------------+-------------------      |
                               v                         v
     ----------------------------------------------     ----------------
    |              expand or shrink resource       |<--|  energy model  |
     ------------------------+---------------------     ----------------
                             v                           |
     -----------      -----------      ------------      v
    |           |    |           |    |            |    ---------------
    |   GRID0   +----+   GRID1   +----+   GRID2    |<--|   governor    |
    |           |    |           |    |            |    ---------------
     -----------      -----------      ------------
                   \            |            /
                    \  -------------------  /
                      |  pages migration  |
                       -------------------

We will introduce the energy model in the follow-up implementation, and
consider the dynamic affinity adjustment between each divided grid in the
runtime.

Signed-off-by: default avatarWang ShaoBo <bobo.shaobowang@huawei.com>
Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: default avatarZhang Changzhong <zhangchangzhong@huawei.com>
Signed-off-by: default avatarYipeng Zou <zouyipeng@huawei.com>
parent 6eb07f99
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -1559,6 +1559,11 @@ struct task_struct {
#ifdef CONFIG_PSI_FINE_GRAINED
	int memstall_type;
#endif

#if defined(CONFIG_QOS_SCHED_SMART_GRID) && !defined(__GENKSYMS__)
		struct sched_grid_qos *grid_qos;
#endif

	/*
	 * New fields for task_struct should be added above here, so that
	 * they are included in the randomized portion of task_struct.
+104 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_GRID_QOS_H
#define _LINUX_SCHED_GRID_QOS_H
#include <linux/nodemask.h>
#include <linux/sched.h>

#ifdef CONFIG_QOS_SCHED_SMART_GRID
enum sched_grid_qos_class {
	SCHED_GRID_QOS_CLASS_LEVEL_1 = 0,
	SCHED_GRID_QOS_CLASS_LEVEL_2 = 1,
	SCHED_GRID_QOS_CLASS_LEVEL_3 = 2,
	SCHED_GRID_QOS_CLASS_LEVEL_4 = 3,
	SCHED_GRID_QOS_CLASS_LEVEL_5 = 4,
	SCHED_GRID_QOS_CLASS_LEVEL_6 = 5,
	SCHED_GRID_QOS_CLASS_LEVEL_7 = 6,
	SCHED_GRID_QOS_CLASS_LEVEL_8 = 7,
	SCHED_GRID_QOS_CLASS_LEVEL_NR
};

enum {
	SCHED_GRID_QOS_IPS_INDEX = 0,
	SCHED_GRID_QOS_MEMBOUND_RATIO_INDEX = 1,
	SCHED_GRID_QOS_MEMBANDWIDTH_INDEX = 2,
	SCHED_GRID_QOS_SAMPLE_NR
};

#define SCHED_GRID_QOS_RING_BUFFER_MAXLEN 100

struct sched_grid_qos_ring_buffer {
	u64 vecs[SCHED_GRID_QOS_RING_BUFFER_MAXLEN];
	unsigned int head;
	void (*push)(u64 *data, int stepsize,
		struct sched_grid_qos_ring_buffer *ring_buffer);
};

struct sched_grid_qos_sample {
	const char *name;
	int index;
	int sample_bypass;
	int sample_times;
	struct sched_grid_qos_ring_buffer ring_buffer;
	u64 pred_target[MAX_NUMNODES];
	void (*cal_target)(int stepsize,
		struct sched_grid_qos_ring_buffer *ring_buffer);

	int account_ready;
	int (*start)(void *arg);
	int (*account)(void *arg);
};

struct sched_grid_qos_stat {
	enum sched_grid_qos_class class_lvl;
	int (*set_class_lvl)(struct sched_grid_qos_stat *qos_stat);
	struct sched_grid_qos_sample sample[SCHED_GRID_QOS_SAMPLE_NR];
};

struct sched_grid_qos_power {
	int cpufreq_sense_ratio;
	int target_cpufreq;
	int cstate_sense_ratio;
};

struct sched_grid_qos_affinity {
	nodemask_t mem_preferred_node_mask;
	const struct cpumask *prefer_cpus;
};

struct task_struct;
struct sched_grid_qos {
	struct sched_grid_qos_stat stat;
	struct sched_grid_qos_power power;
	struct sched_grid_qos_affinity affinity;

	int (*affinity_set)(struct task_struct *p);
};

static inline int sched_qos_affinity_set(struct task_struct *p)
{
	return p->grid_qos->affinity_set(p);
}

int sched_grid_qos_fork(struct task_struct *p, struct task_struct *orig);
void sched_grid_qos_free(struct task_struct *p);

int sched_grid_preferred_interleave_nid(struct mempolicy *policy);
int sched_grid_preferred_nid(int preferred_nid, nodemask_t *nodemask);
#else
static inline int
sched_grid_preferred_interleave_nid(struct mempolicy *policy)
{
	return NUMA_NO_NODE;
}
static inline int
sched_grid_preferred_nid(int preferred_nid, nodemask_t *nodemask)
{
	return preferred_nid;
}

static inline int sched_qos_affinity_set(struct task_struct *p)
{
	return 0;
}
#endif
#endif
+12 −0
Original line number Diff line number Diff line
@@ -99,6 +99,9 @@
#include <linux/stackprotector.h>
#include <linux/user_events.h>
#include <linux/iommu.h>
#ifdef CONFIG_QOS_SCHED_SMART_GRID
#include <linux/sched/grid_qos.h>
#endif
#include <linux/share_pool.h>

#include <asm/pgalloc.h>
@@ -628,6 +631,9 @@ void free_task(struct task_struct *tsk)
#ifdef CONFIG_QOS_SCHED_DYNAMIC_AFFINITY
	if (dynamic_affinity_enabled())
		sched_prefer_cpus_free(tsk);
#endif
#ifdef CONFIG_QOS_SCHED_SMART_GRID
	sched_grid_qos_free(tsk);
#endif
	free_task_struct(tsk);
}
@@ -2389,6 +2395,12 @@ __latent_entropy struct task_struct *copy_process(
	}
	current->flags &= ~PF_NPROC_EXCEEDED;

#ifdef CONFIG_QOS_SCHED_SMART_GRID
	retval = sched_grid_qos_fork(p, current);
	if (retval)
		goto bad_fork_cleanup_count;
#endif

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
+1 −0
Original line number Diff line number Diff line
@@ -32,3 +32,4 @@ obj-y += core.o
obj-y += fair.o
obj-y += build_policy.o
obj-y += build_utility.o
obj-$(CONFIG_QOS_SCHED_SMART_GRID) += grid/
+2 −0
Original line number Diff line number Diff line
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_QOS_SCHED_SMART_GRID)  += qos.o power.o stat.o
Loading