Commit 7c266178 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann
Browse files

bpf: Adjust low/high watermarks in bpf_mem_cache



The same low/high watermarks for every bucket in bpf_mem_cache consume
significant amount of memory. Preallocating 64 elements of 4096 bytes each in
the free list is not efficient. Make low/high watermarks and batching value
dependent on element size. This change brings significant memory savings.

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220902211058.60789-9-alexei.starovoitov@gmail.com
parent 0fd7c5d4
Loading
Loading
Loading
Loading
+36 −14
Original line number Diff line number Diff line
@@ -100,6 +100,7 @@ struct bpf_mem_cache {
	int unit_size;
	/* count of objects in free_llist */
	int free_cnt;
	int low_watermark, high_watermark, batch;
};

struct bpf_mem_caches {
@@ -118,14 +119,6 @@ static struct llist_node notrace *__llist_del_first(struct llist_head *head)
	return entry;
}

#define BATCH 48
#define LOW_WATERMARK 32
#define HIGH_WATERMARK 96
/* Assuming the average number of elements per bucket is 64, when all buckets
 * are used the total memory will be: 64*16*32 + 64*32*32 + 64*64*32 + ... +
 * 64*4096*32 ~ 20Mbyte
 */

static void *__alloc(struct bpf_mem_cache *c, int node)
{
	/* Allocate, but don't deplete atomic reserves that typical
@@ -220,7 +213,7 @@ static void free_bulk(struct bpf_mem_cache *c)
		if (IS_ENABLED(CONFIG_PREEMPT_RT))
			local_irq_restore(flags);
		free_one(c, llnode);
	} while (cnt > (HIGH_WATERMARK + LOW_WATERMARK) / 2);
	} while (cnt > (c->high_watermark + c->low_watermark) / 2);

	/* and drain free_llist_extra */
	llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra))
@@ -234,12 +227,12 @@ static void bpf_mem_refill(struct irq_work *work)

	/* Racy access to free_cnt. It doesn't need to be 100% accurate */
	cnt = c->free_cnt;
	if (cnt < LOW_WATERMARK)
	if (cnt < c->low_watermark)
		/* irq_work runs on this cpu and kmalloc will allocate
		 * from the current numa node which is what we want here.
		 */
		alloc_bulk(c, BATCH, NUMA_NO_NODE);
	else if (cnt > HIGH_WATERMARK)
		alloc_bulk(c, c->batch, NUMA_NO_NODE);
	else if (cnt > c->high_watermark)
		free_bulk(c);
}

@@ -248,9 +241,38 @@ static void notrace irq_work_raise(struct bpf_mem_cache *c)
	irq_work_queue(&c->refill_work);
}

/* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket
 * the freelist cache will be elem_size * 64 (or less) on each cpu.
 *
 * For bpf programs that don't have statically known allocation sizes and
 * assuming (low_mark + high_mark) / 2 as an average number of elements per
 * bucket and all buckets are used the total amount of memory in freelists
 * on each cpu will be:
 * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096
 * == ~ 116 Kbyte using below heuristic.
 * Initialized, but unused bpf allocator (not bpf map specific one) will
 * consume ~ 11 Kbyte per cpu.
 * Typical case will be between 11K and 116K closer to 11K.
 * bpf progs can and should share bpf_mem_cache when possible.
 */

static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
{
	init_irq_work(&c->refill_work, bpf_mem_refill);
	if (c->unit_size <= 256) {
		c->low_watermark = 32;
		c->high_watermark = 96;
	} else {
		/* When page_size == 4k, order-0 cache will have low_mark == 2
		 * and high_mark == 6 with batch alloc of 3 individual pages at
		 * a time.
		 * 8k allocs and above low == 1, high == 3, batch == 1.
		 */
		c->low_watermark = max(32 * 256 / c->unit_size, 1);
		c->high_watermark = max(96 * 256 / c->unit_size, 3);
	}
	c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1);

	/* To avoid consuming memory assume that 1st run of bpf
	 * prog won't be doing more than 4 map_update_elem from
	 * irq disabled region
@@ -392,7 +414,7 @@ static void notrace *unit_alloc(struct bpf_mem_cache *c)

	WARN_ON(cnt < 0);

	if (cnt < LOW_WATERMARK)
	if (cnt < c->low_watermark)
		irq_work_raise(c);
	return llnode;
}
@@ -425,7 +447,7 @@ static void notrace unit_free(struct bpf_mem_cache *c, void *ptr)
	local_dec(&c->active);
	local_irq_restore(flags);

	if (cnt > HIGH_WATERMARK)
	if (cnt > c->high_watermark)
		/* free few objects from current cpu into global kmalloc pool */
		irq_work_raise(c);
}