Unverified Commit 2daf2dcd authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!14059 cgroup/bpf: use a dedicated workqueue for cgroup bpf destruction

Merge Pull Request from: @ci-robot 
 
PR sync from: Chen Ridong <chenridong@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/XXT4Q4LF7EKK6O6T6IYBC4EJIU47UB2D/ 
cgroup/bpf: use a dedicated workqueue for cgroup bpf destruction

Chen Ridong (2):
  Revert "cgroup: Fix AA deadlock caused by cgroup_bpf_release"
  cgroup/bpf: use a dedicated workqueue for cgroup bpf destruction


-- 
2.34.1
 
https://gitee.com/openeuler/kernel/issues/IA4VMT
https://gitee.com/src-openeuler/kernel/issues/IB5KQK 
 
Link:https://gitee.com/openeuler/kernel/pulls/14059

 

Reviewed-by: default avatarZhang Peng <zhangpeng362@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 1f04362c 5027dbc6
Loading
Loading
Loading
Loading
+18 −1
Original line number Diff line number Diff line
@@ -24,6 +24,23 @@
DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
EXPORT_SYMBOL(cgroup_bpf_enabled_key);

/*
 * cgroup bpf destruction makes heavy use of work items and there can be a lot
 * of concurrent destructions.  Use a separate workqueue so that cgroup bpf
 * destruction work items don't end up filling up max_active of system_wq
 * which may lead to deadlock.
 */
static struct workqueue_struct *cgroup_bpf_destroy_wq;

static int __init cgroup_bpf_wq_init(void)
{
	cgroup_bpf_destroy_wq = alloc_workqueue("cgroup_bpf_destroy", 0, 1);
	if (!cgroup_bpf_destroy_wq)
		panic("Failed to alloc workqueue for cgroup bpf destroy.\n");
	return 0;
}
core_initcall(cgroup_bpf_wq_init);

/* __always_inline is necessary to prevent indirect call through run_prog
 * function pointer.
 */
@@ -334,7 +351,7 @@ static void cgroup_bpf_release_fn(struct percpu_ref *ref)
	struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);

	INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
	queue_work(cgroup_destroy_wq, &cgrp->bpf.release_work);
	queue_work(cgroup_bpf_destroy_wq, &cgrp->bpf.release_work);
}

/* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
+0 −1
Original line number Diff line number Diff line
@@ -13,7 +13,6 @@
extern spinlock_t trace_cgroup_path_lock;
extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
extern void __init enable_debug_cgroup(void);
extern struct workqueue_struct *cgroup_destroy_wq;

/*
 * cgroup_path() takes a spin lock. It is good practice not to take
+1 −1
Original line number Diff line number Diff line
@@ -126,7 +126,7 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
 * destruction work items don't end up filling up max_active of system_wq
 * which may lead to deadlock.
 */
struct workqueue_struct *cgroup_destroy_wq;
static struct workqueue_struct *cgroup_destroy_wq;

/* generate an array of cgroup subsystem pointers */
#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,