Commit ef078600 authored by Song Liu's avatar Song Liu Committed by Alexei Starovoitov
Browse files

bpf: Select proper size for bpf_prog_pack



Using HPAGE_PMD_SIZE as the size for bpf_prog_pack is not ideal in some
cases. Specifically, for NUMA systems, __vmalloc_node_range requires
PMD_SIZE * num_online_nodes() to allocate huge pages. Also, if the system
does not support huge pages (i.e., with cmdline option nohugevmalloc), it
is better to use PAGE_SIZE packs.

Add logic to select proper size for bpf_prog_pack. This solution is not
ideal, as it makes assumption about the behavior of module_alloc and
__vmalloc_node_range. However, it appears to be the easiest solution as
it doesn't require changes in module_alloc and vmalloc code.

Fixes: 57631054 ("bpf: Introduce bpf_prog_pack allocator")
Signed-off-by: default avatarSong Liu <song@kernel.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20220311201135.3573610-1-song@kernel.org
parent 46e9244b
Loading
Loading
Loading
Loading
+47 −23
Original line number Diff line number Diff line
@@ -33,6 +33,7 @@
#include <linux/extable.h>
#include <linux/log2.h>
#include <linux/bpf_verifier.h>
#include <linux/nodemask.h>

#include <asm/barrier.h>
#include <asm/unaligned.h>
@@ -815,15 +816,9 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
 * allocator. The prog_pack allocator uses HPAGE_PMD_SIZE page (2MB on x86)
 * to host BPF programs.
 */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define BPF_PROG_PACK_SIZE	HPAGE_PMD_SIZE
#else
#define BPF_PROG_PACK_SIZE	PAGE_SIZE
#endif
#define BPF_PROG_CHUNK_SHIFT	6
#define BPF_PROG_CHUNK_SIZE	(1 << BPF_PROG_CHUNK_SHIFT)
#define BPF_PROG_CHUNK_MASK	(~(BPF_PROG_CHUNK_SIZE - 1))
#define BPF_PROG_CHUNK_COUNT	(BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE)

struct bpf_prog_pack {
	struct list_head list;
@@ -831,30 +826,56 @@ struct bpf_prog_pack {
	unsigned long bitmap[];
};

#define BPF_PROG_MAX_PACK_PROG_SIZE	BPF_PROG_PACK_SIZE
#define BPF_PROG_SIZE_TO_NBITS(size)	(round_up(size, BPF_PROG_CHUNK_SIZE) / BPF_PROG_CHUNK_SIZE)

static size_t bpf_prog_pack_size = -1;

static int bpf_prog_chunk_count(void)
{
	WARN_ON_ONCE(bpf_prog_pack_size == -1);
	return bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE;
}

static DEFINE_MUTEX(pack_mutex);
static LIST_HEAD(pack_list);

static size_t select_bpf_prog_pack_size(void)
{
	size_t size;
	void *ptr;

	size = PMD_SIZE * num_online_nodes();
	ptr = module_alloc(size);

	/* Test whether we can get huge pages. If not just use PAGE_SIZE
	 * packs.
	 */
	if (!ptr || !is_vm_area_hugepages(ptr))
		size = PAGE_SIZE;

	vfree(ptr);
	return size;
}

static struct bpf_prog_pack *alloc_new_pack(void)
{
	struct bpf_prog_pack *pack;

	pack = kzalloc(sizeof(*pack) + BITS_TO_BYTES(BPF_PROG_CHUNK_COUNT), GFP_KERNEL);
	pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(bpf_prog_chunk_count())),
		       GFP_KERNEL);
	if (!pack)
		return NULL;
	pack->ptr = module_alloc(BPF_PROG_PACK_SIZE);
	pack->ptr = module_alloc(bpf_prog_pack_size);
	if (!pack->ptr) {
		kfree(pack);
		return NULL;
	}
	bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE);
	bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
	list_add_tail(&pack->list, &pack_list);

	set_vm_flush_reset_perms(pack->ptr);
	set_memory_ro((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
	set_memory_x((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE);
	set_memory_ro((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
	set_memory_x((unsigned long)pack->ptr, bpf_prog_pack_size / PAGE_SIZE);
	return pack;
}

@@ -865,7 +886,11 @@ static void *bpf_prog_pack_alloc(u32 size)
	unsigned long pos;
	void *ptr = NULL;

	if (size > BPF_PROG_MAX_PACK_PROG_SIZE) {
	mutex_lock(&pack_mutex);
	if (bpf_prog_pack_size == -1)
		bpf_prog_pack_size = select_bpf_prog_pack_size();

	if (size > bpf_prog_pack_size) {
		size = round_up(size, PAGE_SIZE);
		ptr = module_alloc(size);
		if (ptr) {
@@ -873,13 +898,12 @@ static void *bpf_prog_pack_alloc(u32 size)
			set_memory_ro((unsigned long)ptr, size / PAGE_SIZE);
			set_memory_x((unsigned long)ptr, size / PAGE_SIZE);
		}
		return ptr;
		goto out;
	}
	mutex_lock(&pack_mutex);
	list_for_each_entry(pack, &pack_list, list) {
		pos = bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
		pos = bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
						 nbits, 0);
		if (pos < BPF_PROG_CHUNK_COUNT)
		if (pos < bpf_prog_chunk_count())
			goto found_free_area;
	}

@@ -905,13 +929,13 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
	unsigned long pos;
	void *pack_ptr;

	if (hdr->size > BPF_PROG_MAX_PACK_PROG_SIZE) {
	mutex_lock(&pack_mutex);
	if (hdr->size > bpf_prog_pack_size) {
		module_memfree(hdr);
		return;
		goto out;
	}

	pack_ptr = (void *)((unsigned long)hdr & ~(BPF_PROG_PACK_SIZE - 1));
	mutex_lock(&pack_mutex);
	pack_ptr = (void *)((unsigned long)hdr & ~(bpf_prog_pack_size - 1));

	list_for_each_entry(tmp, &pack_list, list) {
		if (tmp->ptr == pack_ptr) {
@@ -927,8 +951,8 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
	pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;

	bitmap_clear(pack->bitmap, pos, nbits);
	if (bitmap_find_next_zero_area(pack->bitmap, BPF_PROG_CHUNK_COUNT, 0,
				       BPF_PROG_CHUNK_COUNT, 0) == 0) {
	if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
				       bpf_prog_chunk_count(), 0) == 0) {
		list_del(&pack->list);
		module_memfree(pack->ptr);
		kfree(pack);