Commit 92d3bff2 authored by Andrii Nakryiko's avatar Andrii Nakryiko
Browse files

Merge branch 'bpf/selftests: page size fixes'



Yauheni Kaliuta says:

====================

A set of fixes for selftests to make them working on systems with PAGE_SIZE > 4K
+ cleanup (version) and ringbuf_multi extention.
---
v3->v4:
- zero initialize BPF programs' static variables;
- add bpf_map__inner_map to libbpf.map in alphabetical order;
- add bpf_map__set_inner_map_fd test to ringbuf_multi;

v2->v3:
 - reorder: move version removing patch first to keep main patches in
   one group;
 - rename "selftests/bpf: pass page size from userspace in sockopt_sk"
   as suggested;
 - convert sockopt_sk test to use ASSERT macros;
 - set page size from userspace
 - split patches to pairs userspace/bpf. It's easier to check that
   every conversion works as expected;

v1->v2:

- add missed 'selftests/bpf: test_progs/sockopt_sk: Convert to use BPF skeleton'
====================

Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 957dca3d cfc0889c
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -2194,6 +2194,7 @@ static int parse_btf_map_def(struct bpf_object *obj,
			map->inner_map = calloc(1, sizeof(*map->inner_map));
			if (!map->inner_map)
				return -ENOMEM;
			map->inner_map->fd = -1;
			map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
			map->inner_map->name = malloc(strlen(map->name) +
						      sizeof(".inner") + 1);
@@ -3845,6 +3846,14 @@ __u32 bpf_map__max_entries(const struct bpf_map *map)
	return map->def.max_entries;
}

struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
{
	if (!bpf_map_type__is_map_in_map(map->def.type))
		return NULL;

	return map->inner_map;
}

int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
{
	if (map->fd >= 0)
@@ -9476,6 +9485,7 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
		pr_warn("error: inner_map_fd already specified\n");
		return -EINVAL;
	}
	zfree(&map->inner_map);
	map->inner_map_fd = fd;
	return 0;
}
+1 −0
Original line number Diff line number Diff line
@@ -480,6 +480,7 @@ LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);

LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);

LIBBPF_API long libbpf_get_error(const void *ptr);

+1 −0
Original line number Diff line number Diff line
@@ -359,5 +359,6 @@ LIBBPF_0.4.0 {
		bpf_linker__finalize;
		bpf_linker__free;
		bpf_linker__new;
		bpf_map__inner_map;
		bpf_object__set_kversion;
} LIBBPF_0.3.0;
+13 −2
Original line number Diff line number Diff line
@@ -12,11 +12,22 @@ void test_map_ptr(void)
	__u32 duration = 0, retval;
	char buf[128];
	int err;
	int page_size = getpagesize();

	skel = map_ptr_kern__open_and_load();
	if (CHECK(!skel, "skel_open_load", "open_load failed\n"))
	skel = map_ptr_kern__open();
	if (!ASSERT_OK_PTR(skel, "skel_open"))
		return;

	err = bpf_map__set_max_entries(skel->maps.m_ringbuf, page_size);
	if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
		goto cleanup;

	err = map_ptr_kern__load(skel);
	if (!ASSERT_OK(err, "skel_load"))
		goto cleanup;

	skel->bss->page_size = page_size;

	err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4,
				sizeof(pkt_v4), buf, NULL, &retval, NULL);

+19 −5
Original line number Diff line number Diff line
@@ -29,22 +29,36 @@ void test_mmap(void)
	struct test_mmap *skel;
	__u64 val = 0;

	skel = test_mmap__open_and_load();
	if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
	skel = test_mmap__open();
	if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
		return;

	err = bpf_map__set_max_entries(skel->maps.rdonly_map, page_size);
	if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
		goto cleanup;

	/* at least 4 pages of data */
	err = bpf_map__set_max_entries(skel->maps.data_map,
				       4 * (page_size / sizeof(u64)));
	if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
		goto cleanup;

	err = test_mmap__load(skel);
	if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
		goto cleanup;

	bss_map = skel->maps.bss;
	data_map = skel->maps.data_map;
	data_map_fd = bpf_map__fd(data_map);

	rdmap_fd = bpf_map__fd(skel->maps.rdonly_map);
	tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
	tmp1 = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0);
	if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) {
		munmap(tmp1, 4096);
		munmap(tmp1, page_size);
		goto cleanup;
	}
	/* now double-check if it's mmap()'able at all */
	tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0);
	tmp1 = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rdmap_fd, 0);
	if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno))
		goto cleanup;

Loading