Commit 7b6abcfa authored by David Vernet's avatar David Vernet Committed by Alexei Starovoitov
Browse files

selftests/bpf: Add selftest suite for cpumask kfuncs



A recent patch added a new set of kfuncs for allocating, freeing,
manipulating, and querying cpumasks. This patch adds a new 'cpumask'
selftest suite which verifies their behavior.

Signed-off-by: default avatarDavid Vernet <void@manifault.com>
Link: https://lore.kernel.org/r/20230125143816.721952-5-void@manifault.com


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent a6541f4d
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -13,6 +13,7 @@ cgroup_hierarchical_stats # JIT does not support calling kernel f
cgrp_kfunc                               # JIT does not support calling kernel function
cgrp_local_storage                       # prog_attach unexpected error: -524                                          (trampoline)
core_read_macros                         # unknown func bpf_probe_read#4                                               (overlapping)
cpumask                                  # JIT does not support calling kernel function
d_path                                   # failed to auto-attach program 'prog_stat': -524                             (trampoline)
decap_sanity                             # JIT does not support calling kernel function                                (kfunc)
deny_namespace                           # failed to attach: ERROR: strerror_r(-524)=22                                (trampoline)
+74 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */

#include <test_progs.h>
#include "cpumask_failure.skel.h"
#include "cpumask_success.skel.h"

static const char * const cpumask_success_testcases[] = {
	"test_alloc_free_cpumask",
	"test_set_clear_cpu",
	"test_setall_clear_cpu",
	"test_first_firstzero_cpu",
	"test_test_and_set_clear",
	"test_and_or_xor",
	"test_intersects_subset",
	"test_copy_any_anyand",
	"test_insert_leave",
	"test_insert_remove_release",
	"test_insert_kptr_get_release",
};

static void verify_success(const char *prog_name)
{
	struct cpumask_success *skel;
	struct bpf_program *prog;
	struct bpf_link *link = NULL;
	pid_t child_pid;
	int status;

	skel = cpumask_success__open();
	if (!ASSERT_OK_PTR(skel, "cpumask_success__open"))
		return;

	skel->bss->pid = getpid();
	skel->bss->nr_cpus = libbpf_num_possible_cpus();

	cpumask_success__load(skel);
	if (!ASSERT_OK_PTR(skel, "cpumask_success__load"))
		goto cleanup;

	prog = bpf_object__find_program_by_name(skel->obj, prog_name);
	if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
		goto cleanup;

	link = bpf_program__attach(prog);
	if (!ASSERT_OK_PTR(link, "bpf_program__attach"))
		goto cleanup;

	child_pid = fork();
	if (!ASSERT_GT(child_pid, -1, "child_pid"))
		goto cleanup;
	if (child_pid == 0)
		_exit(0);
	waitpid(child_pid, &status, 0);
	ASSERT_OK(skel->bss->err, "post_wait_err");

cleanup:
	bpf_link__destroy(link);
	cpumask_success__destroy(skel);
}

void test_cpumask(void)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(cpumask_success_testcases); i++) {
		if (!test__start_subtest(cpumask_success_testcases[i]))
			continue;

		verify_success(cpumask_success_testcases[i]);
	}

	RUN_TESTS(cpumask_failure);
}
+114 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */

#ifndef _CPUMASK_COMMON_H
#define _CPUMASK_COMMON_H

#include "errno.h"
#include <stdbool.h>

int err;

struct __cpumask_map_value {
	struct bpf_cpumask __kptr_ref * cpumask;
};

struct array_map {
	__uint(type, BPF_MAP_TYPE_ARRAY);
	__type(key, int);
	__type(value, struct __cpumask_map_value);
	__uint(max_entries, 1);
} __cpumask_map SEC(".maps");

struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym;
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumask) __ksym;
u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym;
u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym;
void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym;
bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym;
void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym;
void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym;
bool bpf_cpumask_and(struct bpf_cpumask *cpumask,
		     const struct cpumask *src1,
		     const struct cpumask *src2) __ksym;
void bpf_cpumask_or(struct bpf_cpumask *cpumask,
		    const struct cpumask *src1,
		    const struct cpumask *src2) __ksym;
void bpf_cpumask_xor(struct bpf_cpumask *cpumask,
		     const struct cpumask *src1,
		     const struct cpumask *src2) __ksym;
bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym;
bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym;
bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym;
bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym;
bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym;
void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym;
u32 bpf_cpumask_any(const struct cpumask *src) __ksym;
u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) __ksym;

static inline const struct cpumask *cast(struct bpf_cpumask *cpumask)
{
	return (const struct cpumask *)cpumask;
}

static inline struct bpf_cpumask *create_cpumask(void)
{
	struct bpf_cpumask *cpumask;

	cpumask = bpf_cpumask_create();
	if (!cpumask) {
		err = 1;
		return NULL;
	}

	if (!bpf_cpumask_empty(cast(cpumask))) {
		err = 2;
		bpf_cpumask_release(cpumask);
		return NULL;
	}

	return cpumask;
}

static inline struct __cpumask_map_value *cpumask_map_value_lookup(void)
{
	u32 key = 0;

	return bpf_map_lookup_elem(&__cpumask_map, &key);
}

static inline int cpumask_map_insert(struct bpf_cpumask *mask)
{
	struct __cpumask_map_value local, *v;
	long status;
	struct bpf_cpumask *old;
	u32 key = 0;

	local.cpumask = NULL;
	status = bpf_map_update_elem(&__cpumask_map, &key, &local, 0);
	if (status) {
		bpf_cpumask_release(mask);
		return status;
	}

	v = bpf_map_lookup_elem(&__cpumask_map, &key);
	if (!v) {
		bpf_cpumask_release(mask);
		return -ENOENT;
	}

	old = bpf_kptr_xchg(&v->cpumask, mask);
	if (old) {
		bpf_cpumask_release(old);
		return -EEXIST;
	}

	return 0;
}

#endif /* _CPUMASK_COMMON_H */
+126 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */

#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"

#include "cpumask_common.h"

char _license[] SEC("license") = "GPL";

/* Prototype for all of the program trace events below:
 *
 * TRACE_EVENT(task_newtask,
 *         TP_PROTO(struct task_struct *p, u64 clone_flags)
 */

SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(test_alloc_no_release, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;

	cpumask = create_cpumask();

	/* cpumask is never released. */
	return 0;
}

SEC("tp_btf/task_newtask")
__failure __msg("NULL pointer passed to trusted arg0")
int BPF_PROG(test_alloc_double_release, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;

	cpumask = create_cpumask();

	/* cpumask is released twice. */
	bpf_cpumask_release(cpumask);
	bpf_cpumask_release(cpumask);

	return 0;
}

SEC("tp_btf/task_newtask")
__failure __msg("bpf_cpumask_acquire args#0 expected pointer to STRUCT bpf_cpumask")
int BPF_PROG(test_acquire_wrong_cpumask, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;

	/* Can't acquire a non-struct bpf_cpumask. */
	cpumask = bpf_cpumask_acquire((struct bpf_cpumask *)task->cpus_ptr);

	return 0;
}

SEC("tp_btf/task_newtask")
__failure __msg("bpf_cpumask_set_cpu args#1 expected pointer to STRUCT bpf_cpumask")
int BPF_PROG(test_mutate_cpumask, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;

	/* Can't set the CPU of a non-struct bpf_cpumask. */
	bpf_cpumask_set_cpu(0, (struct bpf_cpumask *)task->cpus_ptr);

	return 0;
}

SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(test_insert_remove_no_release, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;
	struct __cpumask_map_value *v;

	cpumask = create_cpumask();
	if (!cpumask)
		return 0;

	if (cpumask_map_insert(cpumask))
		return 0;

	v = cpumask_map_value_lookup();
	if (!v)
		return 0;

	cpumask = bpf_kptr_xchg(&v->cpumask, NULL);

	/* cpumask is never released. */
	return 0;
}

SEC("tp_btf/task_newtask")
__failure __msg("Unreleased reference")
int BPF_PROG(test_kptr_get_no_release, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;
	struct __cpumask_map_value *v;

	cpumask = create_cpumask();
	if (!cpumask)
		return 0;

	if (cpumask_map_insert(cpumask))
		return 0;

	v = cpumask_map_value_lookup();
	if (!v)
		return 0;

	cpumask = bpf_cpumask_kptr_get(&v->cpumask);

	/* cpumask is never released. */
	return 0;
}

SEC("tp_btf/task_newtask")
__failure __msg("NULL pointer passed to trusted arg0")
int BPF_PROG(test_cpumask_null, struct task_struct *task, u64 clone_flags)
{
  /* NULL passed to KF_TRUSTED_ARGS kfunc. */
	bpf_cpumask_empty(NULL);

	return 0;
}
+426 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */

#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>

#include "cpumask_common.h"

char _license[] SEC("license") = "GPL";

int pid, nr_cpus;

static bool is_test_task(void)
{
	int cur_pid = bpf_get_current_pid_tgid() >> 32;

	return pid == cur_pid;
}

static bool create_cpumask_set(struct bpf_cpumask **out1,
			       struct bpf_cpumask **out2,
			       struct bpf_cpumask **out3,
			       struct bpf_cpumask **out4)
{
	struct bpf_cpumask *mask1, *mask2, *mask3, *mask4;

	mask1 = create_cpumask();
	if (!mask1)
		return false;

	mask2 = create_cpumask();
	if (!mask2) {
		bpf_cpumask_release(mask1);
		err = 3;
		return false;
	}

	mask3 = create_cpumask();
	if (!mask3) {
		bpf_cpumask_release(mask1);
		bpf_cpumask_release(mask2);
		err = 4;
		return false;
	}

	mask4 = create_cpumask();
	if (!mask4) {
		bpf_cpumask_release(mask1);
		bpf_cpumask_release(mask2);
		bpf_cpumask_release(mask3);
		err = 5;
		return false;
	}

	*out1 = mask1;
	*out2 = mask2;
	*out3 = mask3;
	*out4 = mask4;

	return true;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_alloc_free_cpumask, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;

	if (!is_test_task())
		return 0;

	cpumask = create_cpumask();
	if (!cpumask)
		return 0;

	bpf_cpumask_release(cpumask);
	return 0;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_set_clear_cpu, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;

	if (!is_test_task())
		return 0;

	cpumask = create_cpumask();
	if (!cpumask)
		return 0;

	bpf_cpumask_set_cpu(0, cpumask);
	if (!bpf_cpumask_test_cpu(0, cast(cpumask))) {
		err = 3;
		goto release_exit;
	}

	bpf_cpumask_clear_cpu(0, cpumask);
	if (bpf_cpumask_test_cpu(0, cast(cpumask))) {
		err = 4;
		goto release_exit;
	}

release_exit:
	bpf_cpumask_release(cpumask);
	return 0;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_setall_clear_cpu, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;

	if (!is_test_task())
		return 0;

	cpumask = create_cpumask();
	if (!cpumask)
		return 0;

	bpf_cpumask_setall(cpumask);
	if (!bpf_cpumask_full(cast(cpumask))) {
		err = 3;
		goto release_exit;
	}

	bpf_cpumask_clear(cpumask);
	if (!bpf_cpumask_empty(cast(cpumask))) {
		err = 4;
		goto release_exit;
	}

release_exit:
	bpf_cpumask_release(cpumask);
	return 0;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_first_firstzero_cpu, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;

	if (!is_test_task())
		return 0;

	cpumask = create_cpumask();
	if (!cpumask)
		return 0;

	if (bpf_cpumask_first(cast(cpumask)) < nr_cpus) {
		err = 3;
		goto release_exit;
	}

	if (bpf_cpumask_first_zero(cast(cpumask)) != 0) {
		bpf_printk("first zero: %d", bpf_cpumask_first_zero(cast(cpumask)));
		err = 4;
		goto release_exit;
	}

	bpf_cpumask_set_cpu(0, cpumask);
	if (bpf_cpumask_first(cast(cpumask)) != 0) {
		err = 5;
		goto release_exit;
	}

	if (bpf_cpumask_first_zero(cast(cpumask)) != 1) {
		err = 6;
		goto release_exit;
	}

release_exit:
	bpf_cpumask_release(cpumask);
	return 0;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;

	if (!is_test_task())
		return 0;

	cpumask = create_cpumask();
	if (!cpumask)
		return 0;

	if (bpf_cpumask_test_and_set_cpu(0, cpumask)) {
		err = 3;
		goto release_exit;
	}

	if (!bpf_cpumask_test_and_set_cpu(0, cpumask)) {
		err = 4;
		goto release_exit;
	}

	if (!bpf_cpumask_test_and_clear_cpu(0, cpumask)) {
		err = 5;
		goto release_exit;
	}

release_exit:
	bpf_cpumask_release(cpumask);
	return 0;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_and_or_xor, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;

	if (!is_test_task())
		return 0;

	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
		return 0;

	bpf_cpumask_set_cpu(0, mask1);
	bpf_cpumask_set_cpu(1, mask2);

	if (bpf_cpumask_and(dst1, cast(mask1), cast(mask2))) {
		err = 6;
		goto release_exit;
	}
	if (!bpf_cpumask_empty(cast(dst1))) {
		err = 7;
		goto release_exit;
	}

	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
	if (!bpf_cpumask_test_cpu(0, cast(dst1))) {
		err = 8;
		goto release_exit;
	}
	if (!bpf_cpumask_test_cpu(1, cast(dst1))) {
		err = 9;
		goto release_exit;
	}

	bpf_cpumask_xor(dst2, cast(mask1), cast(mask2));
	if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
		err = 10;
		goto release_exit;
	}

release_exit:
	bpf_cpumask_release(mask1);
	bpf_cpumask_release(mask2);
	bpf_cpumask_release(dst1);
	bpf_cpumask_release(dst2);
	return 0;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_intersects_subset, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;

	if (!is_test_task())
		return 0;

	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
		return 0;

	bpf_cpumask_set_cpu(0, mask1);
	bpf_cpumask_set_cpu(1, mask2);
	if (bpf_cpumask_intersects(cast(mask1), cast(mask2))) {
		err = 6;
		goto release_exit;
	}

	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));
	if (!bpf_cpumask_subset(cast(mask1), cast(dst1))) {
		err = 7;
		goto release_exit;
	}

	if (!bpf_cpumask_subset(cast(mask2), cast(dst1))) {
		err = 8;
		goto release_exit;
	}

	if (bpf_cpumask_subset(cast(dst1), cast(mask1))) {
		err = 9;
		goto release_exit;
	}

release_exit:
	bpf_cpumask_release(mask1);
	bpf_cpumask_release(mask2);
	bpf_cpumask_release(dst1);
	bpf_cpumask_release(dst2);
	return 0;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *mask1, *mask2, *dst1, *dst2;
	u32 cpu;

	if (!is_test_task())
		return 0;

	if (!create_cpumask_set(&mask1, &mask2, &dst1, &dst2))
		return 0;

	bpf_cpumask_set_cpu(0, mask1);
	bpf_cpumask_set_cpu(1, mask2);
	bpf_cpumask_or(dst1, cast(mask1), cast(mask2));

	cpu = bpf_cpumask_any(cast(mask1));
	if (cpu != 0) {
		err = 6;
		goto release_exit;
	}

	cpu = bpf_cpumask_any(cast(dst2));
	if (cpu < nr_cpus) {
		err = 7;
		goto release_exit;
	}

	bpf_cpumask_copy(dst2, cast(dst1));
	if (!bpf_cpumask_equal(cast(dst1), cast(dst2))) {
		err = 8;
		goto release_exit;
	}

	cpu = bpf_cpumask_any(cast(dst2));
	if (cpu > 1) {
		err = 9;
		goto release_exit;
	}

	cpu = bpf_cpumask_any_and(cast(mask1), cast(mask2));
	if (cpu < nr_cpus) {
		err = 10;
		goto release_exit;
	}

release_exit:
	bpf_cpumask_release(mask1);
	bpf_cpumask_release(mask2);
	bpf_cpumask_release(dst1);
	bpf_cpumask_release(dst2);
	return 0;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_insert_leave, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;
	struct __cpumask_map_value *v;

	cpumask = create_cpumask();
	if (!cpumask)
		return 0;

	if (cpumask_map_insert(cpumask))
		err = 3;

	return 0;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_insert_remove_release, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;
	struct __cpumask_map_value *v;

	cpumask = create_cpumask();
	if (!cpumask)
		return 0;

	if (cpumask_map_insert(cpumask)) {
		err = 3;
		return 0;
	}

	v = cpumask_map_value_lookup();
	if (!v) {
		err = 4;
		return 0;
	}

	cpumask = bpf_kptr_xchg(&v->cpumask, NULL);
	if (cpumask)
		bpf_cpumask_release(cpumask);
	else
		err = 5;

	return 0;
}

SEC("tp_btf/task_newtask")
int BPF_PROG(test_insert_kptr_get_release, struct task_struct *task, u64 clone_flags)
{
	struct bpf_cpumask *cpumask;
	struct __cpumask_map_value *v;

	cpumask = create_cpumask();
	if (!cpumask)
		return 0;

	if (cpumask_map_insert(cpumask)) {
		err = 3;
		return 0;
	}

	v = cpumask_map_value_lookup();
	if (!v) {
		err = 4;
		return 0;
	}

	cpumask = bpf_cpumask_kptr_get(&v->cpumask);
	if (cpumask)
		bpf_cpumask_release(cpumask);
	else
		err = 5;

	return 0;
}