Commit 18e027b1 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann
Browse files

bpf: Factor out inc/dec of active flag into helpers.



Factor out local_inc/dec_return(&c->active) into helpers.
No functional changes.

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarHou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20230706033447.54696-6-alexei.starovoitov@gmail.com
parent 05ae6865
Loading
Loading
Loading
Loading
+18 −12
Original line number Diff line number Diff line
@@ -154,17 +154,15 @@ static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c)
#endif
}

static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
static void inc_active(struct bpf_mem_cache *c, unsigned long *flags)
{
	unsigned long flags;

	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		/* In RT irq_work runs in per-cpu kthread, so disable
		 * interrupts to avoid preemption and interrupts and
		 * reduce the chance of bpf prog executing on this cpu
		 * when active counter is busy.
		 */
		local_irq_save(flags);
		local_irq_save(*flags);
	/* alloc_bulk runs from irq_work which will not preempt a bpf
	 * program that does unit_alloc/unit_free since IRQs are
	 * disabled there. There is no race to increment 'active'
@@ -172,13 +170,25 @@ static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
	 * bpf prog preempted this loop.
	 */
	WARN_ON_ONCE(local_inc_return(&c->active) != 1);
	__llist_add(obj, &c->free_llist);
	c->free_cnt++;
}

static void dec_active(struct bpf_mem_cache *c, unsigned long flags)
{
	local_dec(&c->active);
	if (IS_ENABLED(CONFIG_PREEMPT_RT))
		local_irq_restore(flags);
}

static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj)
{
	unsigned long flags;

	inc_active(c, &flags);
	__llist_add(obj, &c->free_llist);
	c->free_cnt++;
	dec_active(c, flags);
}

/* Mostly runs from irq_work except __init phase. */
static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
{
@@ -300,17 +310,13 @@ static void free_bulk(struct bpf_mem_cache *c)
	int cnt;

	do {
		if (IS_ENABLED(CONFIG_PREEMPT_RT))
			local_irq_save(flags);
		WARN_ON_ONCE(local_inc_return(&c->active) != 1);
		inc_active(c, &flags);
		llnode = __llist_del_first(&c->free_llist);
		if (llnode)
			cnt = --c->free_cnt;
		else
			cnt = 0;
		local_dec(&c->active);
		if (IS_ENABLED(CONFIG_PREEMPT_RT))
			local_irq_restore(flags);
		dec_active(c, flags);
		if (llnode)
			enque_to_free(c, llnode);
	} while (cnt > (c->high_watermark + c->low_watermark) / 2);