Commit 6f06e093 authored by Cheng Yu's avatar Cheng Yu
Browse files

sched/ebpf: Support task selection programmable

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IB6NEH


CVE: NA

--------------------------------

Pre-embed hooks in the pick_next_task_fair() so that users can
customize the strategy for selecting tasks and provide some useful
kfuncs.

Signed-off-by: default avatarCheng Yu <serein.chengyu@huawei.com>
parent ace17527
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -2,3 +2,7 @@
BPF_SCHED_HOOK(int, -1, cfs_select_rq, struct sched_migrate_ctx *ctx)
BPF_SCHED_HOOK(int, -1, cfs_can_migrate_task, struct task_struct *p,
			struct sched_migrate_node *migrate_node)
BPF_SCHED_HOOK(int, -1, cfs_tag_entity_eligible, struct sched_entity *se)
BPF_SCHED_HOOK(int, -1, cfs_tag_pick_next_entity,
			const struct sched_entity *curr,
			const struct sched_entity *next)
+53 −3
Original line number Diff line number Diff line
@@ -184,6 +184,46 @@ BTF_ID_LIST(cpustats_dtor_ids)
BTF_ID(struct, bpf_sched_cpu_stats)
BTF_ID(func, bpf_sched_cpustats_release)

__bpf_kfunc int bpf_sched_entity_is_task(struct sched_entity *se)
{
	if (!se)
		return -EINVAL;

	return entity_is_task(se);
}

__bpf_kfunc struct task_struct *bpf_sched_entity_to_task(struct sched_entity *se)
{
	if (se && entity_is_task(se))
		return task_of(se);

	return NULL;
}

__bpf_kfunc long bpf_sched_tag_of_entity(struct sched_entity *se)
{
	if (!se)
		return -EINVAL;

	if (entity_is_task(se))
		return task_of(se)->tag;

	return group_cfs_rq(se)->tg->tag;
}

BTF_SET8_START(sched_entity_kfunc_btf_ids)
BTF_ID_FLAGS(func, bpf_sched_entity_is_task)
BTF_ID_FLAGS(func, bpf_sched_entity_to_task)
BTF_ID_FLAGS(func, bpf_sched_tag_of_entity)
BTF_SET8_END(sched_entity_kfunc_btf_ids)

static const struct btf_kfunc_id_set sched_entity_kfunc_set = {
	.owner		= THIS_MODULE,
	.set		= &sched_entity_kfunc_btf_ids,
};

BTF_ID_LIST(sched_entity_dtor_ids)

static int __init bpf_kfunc_init(void)
{
	int ret;
@@ -193,12 +233,22 @@ static int __init bpf_kfunc_init(void)
			.kfunc_btf_id = cpustats_dtor_ids[1]
		},
	};
	const struct btf_id_dtor_kfunc sched_entity_dtors[] = {
		{
			.btf_id	      = sched_entity_dtor_ids[0],
			.kfunc_btf_id = sched_entity_dtor_ids[1]
		},
	};

	ret = bpf_mem_alloc_init(&bpf_cpustats_ma, sizeof(struct bpf_sched_cpu_stats), false);
	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &cpustats_kfunc_set);
	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED, &cpustats_kfunc_set);
	return ret ?: register_btf_id_dtor_kfuncs(cpustats_dtors,
	ret = ret ?: register_btf_id_dtor_kfuncs(cpustats_dtors,
						ARRAY_SIZE(cpustats_dtors),
						THIS_MODULE);
	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED, &sched_entity_kfunc_set);
	return ret ?: register_btf_id_dtor_kfuncs(sched_entity_dtors,
						ARRAY_SIZE(sched_entity_dtors),
						THIS_MODULE);
}
late_initcall(bpf_kfunc_init);
+14 −0
Original line number Diff line number Diff line
@@ -708,6 +708,13 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
static inline bool entity_before(const struct sched_entity *a,
				 const struct sched_entity *b)
{
#ifdef CONFIG_BPF_SCHED
	if (bpf_sched_enabled()) {
		if (bpf_sched_cfs_tag_pick_next_entity(a, b) == 1)
			return true;
	}
#endif

	/*
	 * Tiebreak on vruntime seems unnecessary since it can
	 * hardly happen.
@@ -905,6 +912,13 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)

int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
#ifdef CONFIG_BPF_SCHED
	if (bpf_sched_enabled()) {
		if (bpf_sched_cfs_tag_entity_eligible(se) == 1)
			return 1;
	}
#endif

	return vruntime_eligible(cfs_rq, se->vruntime);
}