Commit 48671232 authored by Yonghong Song's avatar Yonghong Song Committed by Alexei Starovoitov
Browse files

selftests/bpf: Add tests for bpf_rcu_read_lock()



Add a few positive/negative tests to test bpf_rcu_read_lock()
and its corresponding verifier support. The new test will fail
on s390x and aarch64, so an entry is added to each of their
respective deny lists.

Acked-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/r/20221124053222.2374650-1-yhs@fb.com


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 9bb00b28
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -45,6 +45,7 @@ modify_return # modify_return__attach failed
module_attach                                    # skel_attach skeleton attach failed: -524
mptcp/base                                       # run_test mptcp unexpected error: -524 (errno 524)
netcnt                                           # packets unexpected packets: actual 10001 != expected 10000
rcu_read_lock                                    # failed to attach: ERROR: strerror_r(-524)=22
recursion                                        # skel_attach unexpected error: -524 (errno 524)
ringbuf                                          # skel_attach skeleton attachment failed: -1
setget_sockopt                                   # attach_cgroup unexpected error: -524
+1 −0
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@ module_attach # skel_attach skeleton attach failed: -
mptcp
netcnt                                   # failed to load BPF skeleton 'netcnt_prog': -7                               (?)
probe_user                               # check_kprobe_res wrong kprobe res from probe read                           (?)
rcu_read_lock                            # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3                (?)
recursion                                # skel_attach unexpected error: -524                                          (trampoline)
ringbuf                                  # skel_load skeleton load failed                                              (?)
select_reuseport                         # intermittently fails on new s390x setup
+158 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/

#define _GNU_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <test_progs.h>
#include <bpf/btf.h>
#include "rcu_read_lock.skel.h"
#include "cgroup_helpers.h"

static unsigned long long cgroup_id;

static void test_success(void)
{
	struct rcu_read_lock *skel;
	int err;

	skel = rcu_read_lock__open();
	if (!ASSERT_OK_PTR(skel, "skel_open"))
		return;

	skel->bss->target_pid = syscall(SYS_gettid);

	bpf_program__set_autoload(skel->progs.get_cgroup_id, true);
	bpf_program__set_autoload(skel->progs.task_succ, true);
	bpf_program__set_autoload(skel->progs.no_lock, true);
	bpf_program__set_autoload(skel->progs.two_regions, true);
	bpf_program__set_autoload(skel->progs.non_sleepable_1, true);
	bpf_program__set_autoload(skel->progs.non_sleepable_2, true);
	err = rcu_read_lock__load(skel);
	if (!ASSERT_OK(err, "skel_load"))
		goto out;

	err = rcu_read_lock__attach(skel);
	if (!ASSERT_OK(err, "skel_attach"))
		goto out;

	syscall(SYS_getpgid);

	ASSERT_EQ(skel->bss->task_storage_val, 2, "task_storage_val");
	ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
out:
	rcu_read_lock__destroy(skel);
}

static void test_rcuptr_acquire(void)
{
	struct rcu_read_lock *skel;
	int err;

	skel = rcu_read_lock__open();
	if (!ASSERT_OK_PTR(skel, "skel_open"))
		return;

	skel->bss->target_pid = syscall(SYS_gettid);

	bpf_program__set_autoload(skel->progs.task_acquire, true);
	err = rcu_read_lock__load(skel);
	if (!ASSERT_OK(err, "skel_load"))
		goto out;

	err = rcu_read_lock__attach(skel);
	ASSERT_OK(err, "skel_attach");
out:
	rcu_read_lock__destroy(skel);
}

static const char * const inproper_region_tests[] = {
	"miss_lock",
	"miss_unlock",
	"non_sleepable_rcu_mismatch",
	"inproper_sleepable_helper",
	"inproper_sleepable_kfunc",
	"nested_rcu_region",
};

static void test_inproper_region(void)
{
	struct rcu_read_lock *skel;
	struct bpf_program *prog;
	int i, err;

	for (i = 0; i < ARRAY_SIZE(inproper_region_tests); i++) {
		skel = rcu_read_lock__open();
		if (!ASSERT_OK_PTR(skel, "skel_open"))
			return;

		prog = bpf_object__find_program_by_name(skel->obj, inproper_region_tests[i]);
		if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
			goto out;
		bpf_program__set_autoload(prog, true);
		err = rcu_read_lock__load(skel);
		ASSERT_ERR(err, "skel_load");
out:
		rcu_read_lock__destroy(skel);
	}
}

static const char * const rcuptr_misuse_tests[] = {
	"task_untrusted_non_rcuptr",
	"task_untrusted_rcuptr",
	"cross_rcu_region",
};

static void test_rcuptr_misuse(void)
{
	struct rcu_read_lock *skel;
	struct bpf_program *prog;
	int i, err;

	for (i = 0; i < ARRAY_SIZE(rcuptr_misuse_tests); i++) {
		skel = rcu_read_lock__open();
		if (!ASSERT_OK_PTR(skel, "skel_open"))
			return;

		prog = bpf_object__find_program_by_name(skel->obj, rcuptr_misuse_tests[i]);
		if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
			goto out;
		bpf_program__set_autoload(prog, true);
		err = rcu_read_lock__load(skel);
		ASSERT_ERR(err, "skel_load");
out:
		rcu_read_lock__destroy(skel);
	}
}

void test_rcu_read_lock(void)
{
	struct btf *vmlinux_btf;
	int cgroup_fd;

	vmlinux_btf = btf__load_vmlinux_btf();
	if (!ASSERT_OK_PTR(vmlinux_btf, "could not load vmlinux BTF"))
		return;
	if (btf__find_by_name_kind(vmlinux_btf, "rcu", BTF_KIND_TYPE_TAG) < 0) {
		test__skip();
		goto out;
	}

	cgroup_fd = test__join_cgroup("/rcu_read_lock");
	if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /rcu_read_lock"))
		goto out;

	cgroup_id = get_cgroup_id("/rcu_read_lock");
	if (test__start_subtest("success"))
		test_success();
	if (test__start_subtest("rcuptr_acquire"))
		test_rcuptr_acquire();
	if (test__start_subtest("negative_tests_inproper_region"))
		test_inproper_region();
	if (test__start_subtest("negative_tests_rcuptr_misuse"))
		test_rcuptr_misuse();
	close(cgroup_fd);
out:
	btf__free(vmlinux_btf);
}
+290 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */

#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_tracing_net.h"
#include "bpf_misc.h"

char _license[] SEC("license") = "GPL";

struct {
	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
	__uint(map_flags, BPF_F_NO_PREALLOC);
	__type(key, int);
	__type(value, long);
} map_a SEC(".maps");

__u32 user_data, key_serial, target_pid;
__u64 flags, task_storage_val, cgroup_id;

struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
void bpf_key_put(struct bpf_key *key) __ksym;
void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
void bpf_task_release(struct task_struct *p) __ksym;

SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int get_cgroup_id(void *ctx)
{
	struct task_struct *task;

	task = bpf_get_current_task_btf();
	if (task->pid != target_pid)
		return 0;

	/* simulate bpf_get_current_cgroup_id() helper */
	bpf_rcu_read_lock();
	cgroup_id = task->cgroups->dfl_cgrp->kn->id;
	bpf_rcu_read_unlock();
	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int task_succ(void *ctx)
{
	struct task_struct *task, *real_parent;
	long init_val = 2;
	long *ptr;

	task = bpf_get_current_task_btf();
	if (task->pid != target_pid)
		return 0;

	bpf_rcu_read_lock();
	/* region including helper using rcu ptr real_parent */
	real_parent = task->real_parent;
	ptr = bpf_task_storage_get(&map_a, real_parent, &init_val,
				   BPF_LOCAL_STORAGE_GET_F_CREATE);
	if (!ptr)
		goto out;
	ptr = bpf_task_storage_get(&map_a, real_parent, 0, 0);
	if (!ptr)
		goto out;
	task_storage_val = *ptr;
out:
	bpf_rcu_read_unlock();
	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
int no_lock(void *ctx)
{
	struct task_struct *task, *real_parent;

	/* no bpf_rcu_read_lock(), old code still works */
	task = bpf_get_current_task_btf();
	real_parent = task->real_parent;
	(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
int two_regions(void *ctx)
{
	struct task_struct *task, *real_parent;

	/* two regions */
	task = bpf_get_current_task_btf();
	bpf_rcu_read_lock();
	bpf_rcu_read_unlock();
	bpf_rcu_read_lock();
	real_parent = task->real_parent;
	(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
	bpf_rcu_read_unlock();
	return 0;
}

SEC("?fentry/" SYS_PREFIX "sys_getpgid")
int non_sleepable_1(void *ctx)
{
	struct task_struct *task, *real_parent;

	task = bpf_get_current_task_btf();
	bpf_rcu_read_lock();
	real_parent = task->real_parent;
	(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
	bpf_rcu_read_unlock();
	return 0;
}

SEC("?fentry/" SYS_PREFIX "sys_getpgid")
int non_sleepable_2(void *ctx)
{
	struct task_struct *task, *real_parent;

	bpf_rcu_read_lock();
	task = bpf_get_current_task_btf();
	bpf_rcu_read_unlock();

	bpf_rcu_read_lock();
	real_parent = task->real_parent;
	(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
	bpf_rcu_read_unlock();
	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
int task_acquire(void *ctx)
{
	struct task_struct *task, *real_parent;

	task = bpf_get_current_task_btf();
	bpf_rcu_read_lock();
	real_parent = task->real_parent;
	/* acquire a reference which can be used outside rcu read lock region */
	real_parent = bpf_task_acquire(real_parent);
	bpf_rcu_read_unlock();
	(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
	bpf_task_release(real_parent);
	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int miss_lock(void *ctx)
{
	struct task_struct *task;
	struct css_set *cgroups;
	struct cgroup *dfl_cgrp;

	/* missing bpf_rcu_read_lock() */
	task = bpf_get_current_task_btf();
	bpf_rcu_read_lock();
	(void)bpf_task_storage_get(&map_a, task, 0, 0);
	bpf_rcu_read_unlock();
	bpf_rcu_read_unlock();
	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int miss_unlock(void *ctx)
{
	struct task_struct *task;
	struct css_set *cgroups;
	struct cgroup *dfl_cgrp;

	/* missing bpf_rcu_read_unlock() */
	task = bpf_get_current_task_btf();
	bpf_rcu_read_lock();
	(void)bpf_task_storage_get(&map_a, task, 0, 0);
	return 0;
}

SEC("?fentry/" SYS_PREFIX "sys_getpgid")
int non_sleepable_rcu_mismatch(void *ctx)
{
	struct task_struct *task, *real_parent;

	task = bpf_get_current_task_btf();
	/* non-sleepable: missing bpf_rcu_read_unlock() in one path */
	bpf_rcu_read_lock();
	real_parent = task->real_parent;
	(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
	if (real_parent)
		bpf_rcu_read_unlock();
	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int inproper_sleepable_helper(void *ctx)
{
	struct task_struct *task, *real_parent;
	struct pt_regs *regs;
	__u32 value = 0;
	void *ptr;

	task = bpf_get_current_task_btf();
	/* sleepable helper in rcu read lock region */
	bpf_rcu_read_lock();
	real_parent = task->real_parent;
	regs = (struct pt_regs *)bpf_task_pt_regs(real_parent);
	if (!regs) {
		bpf_rcu_read_unlock();
		return 0;
	}

	ptr = (void *)PT_REGS_IP(regs);
	(void)bpf_copy_from_user_task(&value, sizeof(uint32_t), ptr, task, 0);
	user_data = value;
	(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
	bpf_rcu_read_unlock();
	return 0;
}

SEC("?lsm.s/bpf")
int BPF_PROG(inproper_sleepable_kfunc, int cmd, union bpf_attr *attr, unsigned int size)
{
	struct bpf_key *bkey;

	/* sleepable kfunc in rcu read lock region */
	bpf_rcu_read_lock();
	bkey = bpf_lookup_user_key(key_serial, flags);
	bpf_rcu_read_unlock();
	if (!bkey)
		return -1;
	bpf_key_put(bkey);

	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
int nested_rcu_region(void *ctx)
{
	struct task_struct *task, *real_parent;

	/* nested rcu read lock regions */
	task = bpf_get_current_task_btf();
	bpf_rcu_read_lock();
	bpf_rcu_read_lock();
	real_parent = task->real_parent;
	(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
	bpf_rcu_read_unlock();
	bpf_rcu_read_unlock();
	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int task_untrusted_non_rcuptr(void *ctx)
{
	struct task_struct *task, *last_wakee;

	task = bpf_get_current_task_btf();
	bpf_rcu_read_lock();
	/* the pointer last_wakee marked as untrusted */
	last_wakee = task->real_parent->last_wakee;
	(void)bpf_task_storage_get(&map_a, last_wakee, 0, 0);
	bpf_rcu_read_unlock();
	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
int task_untrusted_rcuptr(void *ctx)
{
	struct task_struct *task, *real_parent;

	task = bpf_get_current_task_btf();
	bpf_rcu_read_lock();
	real_parent = task->real_parent;
	bpf_rcu_read_unlock();
	/* helper use of rcu ptr outside the rcu read lock region */
	(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
	return 0;
}

SEC("?fentry.s/" SYS_PREFIX "sys_nanosleep")
int cross_rcu_region(void *ctx)
{
	struct task_struct *task, *real_parent;

	/* rcu ptr define/use in different regions */
	task = bpf_get_current_task_btf();
	bpf_rcu_read_lock();
	real_parent = task->real_parent;
	bpf_rcu_read_unlock();
	bpf_rcu_read_lock();
	(void)bpf_task_storage_get(&map_a, real_parent, 0, 0);
	bpf_rcu_read_unlock();
	return 0;
}