Commit 05ae6865 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann
Browse files

bpf: Refactor alloc_bulk().



Factor out inner body of alloc_bulk into separate helper.
No functional changes.

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarHou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20230706033447.54696-5-alexei.starovoitov@gmail.com
parent 9de3e815
Loading
Loading
Loading
Loading
+26 −20
Original line number Diff line number Diff line
@@ -154,11 +154,35 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
#endif
}

static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
{
	unsigned long flags;

	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		/* In RT irq_work runs in per-cpu kthread, so disable
		 * interrupts to avoid preemption and interrupts and
		 * reduce the chance of bpf prog executing on this cpu
		 * when active counter is busy.
		 */
		local_irq_save(flags);
	/* alloc_bulk runs from irq_work which will not preempt a bpf
	 * program that does unit_alloc/unit_free since IRQs are
	 * disabled there. There is no race to increment 'active'
	 * counter. It protects free_llist from corruption in case NMI
	 * bpf prog preempted this loop.
	 */
	WARN_ON_ONCE(local_inc_return(&c->active) != 1);
	__llist_add(obj, &c->free_llist);
	c->free_cnt++;
	local_dec(&c->active);
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		local_irq_restore(flags);
}

/* Mostly runs from irq_work except __init phase. */
static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
{
	struct mem_cgroup *memcg = NULL, *old_memcg;
	unsigned long flags;
	void *obj;
	int i;

@@ -188,25 +212,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
			if (!obj)
				break;
		}
		if (IS_ENABLED(CONFIG_PREEMPT_RT))
			/* In RT irq_work runs in per-cpu kthread, so disable
			 * interrupts to avoid preemption and interrupts and
			 * reduce the chance of bpf prog executing on this cpu
			 * when active counter is busy.
			 */
			local_irq_save(flags);
		/* alloc_bulk runs from irq_work which will not preempt a bpf
		 * program that does unit_alloc/unit_free since IRQs are
		 * disabled there. There is no race to increment 'active'
		 * counter. It protects free_llist from corruption in case NMI
		 * bpf prog preempted this loop.
		 */
		WARN_ON_ONCE(local_inc_return(&c->active) != 1);
		__llist_add(obj, &c->free_llist);
		c->free_cnt++;
		local_dec(&c->active);
		if (IS_ENABLED(CONFIG_PREEMPT_RT))
			local_irq_restore(flags);
		add_obj_to_free_list(c, obj);
	}
	set_active_memcg(old_memcg);
	mem_cgroup_put(memcg);