Commit b2531d4b authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Daniel Borkmann
Browse files

selftests/bpf: Convert some selftests to high-level BPF map APIs



Convert a bunch of selftests to using newly added high-level BPF map
APIs.

This change exposed that map_kptr selftests allocated too big buffer,
which is fixed in this patch as well.

Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20220512220713.2617964-2-andrii@kernel.org
parent 737d0646
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -167,7 +167,7 @@ void test_core_autosize(void)
	if (!ASSERT_OK_PTR(bss_map, "bss_map_find"))
		goto cleanup;

	err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, (void *)&out);
	err = bpf_map__lookup_elem(bss_map, &zero, sizeof(zero), &out, sizeof(out), 0);
	if (!ASSERT_OK(err, "bss_lookup"))
		goto cleanup;

+9 −8
Original line number Diff line number Diff line
@@ -6,31 +6,32 @@

void test_core_retro(void)
{
	int err, zero = 0, res, duration = 0, my_pid = getpid();
	int err, zero = 0, res, my_pid = getpid();
	struct test_core_retro *skel;

	/* load program */
	skel = test_core_retro__open_and_load();
	if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
	if (!ASSERT_OK_PTR(skel, "skel_load"))
		goto out_close;

	err = bpf_map_update_elem(bpf_map__fd(skel->maps.exp_tgid_map), &zero, &my_pid, 0);
	if (CHECK(err, "map_update", "failed to set expected PID: %d\n", errno))
	err = bpf_map__update_elem(skel->maps.exp_tgid_map, &zero, sizeof(zero),
				   &my_pid, sizeof(my_pid), 0);
	if (!ASSERT_OK(err, "map_update"))
		goto out_close;

	/* attach probe */
	err = test_core_retro__attach(skel);
	if (CHECK(err, "attach_kprobe", "err %d\n", err))
	if (!ASSERT_OK(err, "attach_kprobe"))
		goto out_close;

	/* trigger */
	usleep(1);

	err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res);
	if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno))
	err = bpf_map__lookup_elem(skel->maps.results, &zero, sizeof(zero), &res, sizeof(res), 0);
	if (!ASSERT_OK(err, "map_lookup"))
		goto out_close;

	CHECK(res != my_pid, "pid_check", "got %d != exp %d\n", res, my_pid);
	ASSERT_EQ(res, my_pid, "pid_check");

out_close:
	test_core_retro__destroy(skel);
+17 −13
Original line number Diff line number Diff line
@@ -10,9 +10,10 @@ static unsigned int duration;

static void test_hash_map(void)
{
	int i, err, hashmap_fd, max_entries, percpu_map_fd;
	int i, err, max_entries;
	struct for_each_hash_map_elem *skel;
	__u64 *percpu_valbuf = NULL;
	size_t percpu_val_sz;
	__u32 key, num_cpus;
	__u64 val;
	LIBBPF_OPTS(bpf_test_run_opts, topts,
@@ -25,26 +26,27 @@ static void test_hash_map(void)
	if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
		return;

	hashmap_fd = bpf_map__fd(skel->maps.hashmap);
	max_entries = bpf_map__max_entries(skel->maps.hashmap);
	for (i = 0; i < max_entries; i++) {
		key = i;
		val = i + 1;
		err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY);
		err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
					   &val, sizeof(val), BPF_ANY);
		if (!ASSERT_OK(err, "map_update"))
			goto out;
	}

	num_cpus = bpf_num_possible_cpus();
	percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
	percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
	percpu_val_sz = sizeof(__u64) * num_cpus;
	percpu_valbuf = malloc(percpu_val_sz);
	if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
		goto out;

	key = 1;
	for (i = 0; i < num_cpus; i++)
		percpu_valbuf[i] = i + 1;
	err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
	err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
				   percpu_valbuf, percpu_val_sz, BPF_ANY);
	if (!ASSERT_OK(err, "percpu_map_update"))
		goto out;

@@ -58,7 +60,7 @@ static void test_hash_map(void)
	ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");

	key = 1;
	err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
	err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
	ASSERT_ERR(err, "hashmap_lookup");

	ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
@@ -75,9 +77,10 @@ static void test_hash_map(void)
static void test_array_map(void)
{
	__u32 key, num_cpus, max_entries;
	int i, arraymap_fd, percpu_map_fd, err;
	int i, err;
	struct for_each_array_map_elem *skel;
	__u64 *percpu_valbuf = NULL;
	size_t percpu_val_sz;
	__u64 val, expected_total;
	LIBBPF_OPTS(bpf_test_run_opts, topts,
		.data_in = &pkt_v4,
@@ -89,7 +92,6 @@ static void test_array_map(void)
	if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
		return;

	arraymap_fd = bpf_map__fd(skel->maps.arraymap);
	expected_total = 0;
	max_entries = bpf_map__max_entries(skel->maps.arraymap);
	for (i = 0; i < max_entries; i++) {
@@ -98,21 +100,23 @@ static void test_array_map(void)
		/* skip the last iteration for expected total */
		if (i != max_entries - 1)
			expected_total += val;
		err = bpf_map_update_elem(arraymap_fd, &key, &val, BPF_ANY);
		err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
					   &val, sizeof(val), BPF_ANY);
		if (!ASSERT_OK(err, "map_update"))
			goto out;
	}

	num_cpus = bpf_num_possible_cpus();
	percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
	percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
	percpu_val_sz = sizeof(__u64) * num_cpus;
	percpu_valbuf = malloc(percpu_val_sz);
	if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
		goto out;

	key = 0;
	for (i = 0; i < num_cpus; i++)
		percpu_valbuf[i] = i + 1;
	err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
	err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
				   percpu_valbuf, percpu_val_sz, BPF_ANY);
	if (!ASSERT_OK(err, "percpu_map_update"))
		goto out;

+9 −6
Original line number Diff line number Diff line
@@ -112,7 +112,8 @@ static void test_lookup_and_delete_hash(void)

	/* Lookup and delete element. */
	key = 1;
	err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
	err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
					      &key, sizeof(key), &value, sizeof(value), 0);
	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
		goto cleanup;

@@ -147,7 +148,8 @@ static void test_lookup_and_delete_percpu_hash(void)

	/* Lookup and delete element. */
	key = 1;
	err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
	err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
					      &key, sizeof(key), value, sizeof(value), 0);
	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
		goto cleanup;

@@ -191,7 +193,8 @@ static void test_lookup_and_delete_lru_hash(void)
		goto cleanup;

	/* Lookup and delete element 3. */
	err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
	err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
					      &key, sizeof(key), &value, sizeof(value), 0);
	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
		goto cleanup;

@@ -240,10 +243,10 @@ static void test_lookup_and_delete_lru_percpu_hash(void)
		value[i] = 0;

	/* Lookup and delete element 3. */
	err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) {
	err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
					      &key, sizeof(key), value, sizeof(value), 0);
	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
		goto cleanup;
	}

	/* Check if only one CPU has set the value. */
	for (i = 0; i < nr_cpus; i++) {
+14 −9
Original line number Diff line number Diff line
@@ -91,7 +91,7 @@ static void test_map_kptr_success(bool test_run)
	);
	struct map_kptr *skel;
	int key = 0, ret;
	char buf[24];
	char buf[16];

	skel = map_kptr__open_and_load();
	if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
@@ -107,24 +107,29 @@ static void test_map_kptr_success(bool test_run)
	if (test_run)
		return;

	ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
	ret = bpf_map__update_elem(skel->maps.array_map,
				   &key, sizeof(key), buf, sizeof(buf), 0);
	ASSERT_OK(ret, "array_map update");
	ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
	ret = bpf_map__update_elem(skel->maps.array_map,
				   &key, sizeof(key), buf, sizeof(buf), 0);
	ASSERT_OK(ret, "array_map update2");

	ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_map), &key, buf, 0);
	ret = bpf_map__update_elem(skel->maps.hash_map,
				   &key, sizeof(key), buf, sizeof(buf), 0);
	ASSERT_OK(ret, "hash_map update");
	ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_map), &key);
	ret = bpf_map__delete_elem(skel->maps.hash_map, &key, sizeof(key), 0);
	ASSERT_OK(ret, "hash_map delete");

	ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key, buf, 0);
	ret = bpf_map__update_elem(skel->maps.hash_malloc_map,
				   &key, sizeof(key), buf, sizeof(buf), 0);
	ASSERT_OK(ret, "hash_malloc_map update");
	ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key);
	ret = bpf_map__delete_elem(skel->maps.hash_malloc_map, &key, sizeof(key), 0);
	ASSERT_OK(ret, "hash_malloc_map delete");

	ret = bpf_map_update_elem(bpf_map__fd(skel->maps.lru_hash_map), &key, buf, 0);
	ret = bpf_map__update_elem(skel->maps.lru_hash_map,
				   &key, sizeof(key), buf, sizeof(buf), 0);
	ASSERT_OK(ret, "lru_hash_map update");
	ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.lru_hash_map), &key);
	ret = bpf_map__delete_elem(skel->maps.lru_hash_map, &key, sizeof(key), 0);
	ASSERT_OK(ret, "lru_hash_map delete");

	map_kptr__destroy(skel);
Loading