Commit 58d84bee authored by Yonghong Song's avatar Yonghong Song Committed by Alexei Starovoitov
Browse files

bpf: Add type cast unit tests

Three tests are added. One is from John Fastabend ({1]) which tests
tracing style access for xdp program from the kernel ctx.
Another is a tc test to test both kernel ctx tracing style access
and explicit non-ctx type cast. The third one is for negative tests
including two tests, a tp_bpf test where the bpf_rdonly_cast()
returns a untrusted ptr which cannot be used as helper argument,
and a tracepoint test where the kernel ctx is a u64.

Also added the test to DENYLIST.s390x since s390 does not currently
support calling kernel functions in JIT mode.

  [1] https://lore.kernel.org/bpf/20221109215242.1279993-1-john.fastabend@gmail.com/



Signed-off-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/r/20221120195442.3114844-1-yhs@fb.com


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent a35b9af4
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -71,6 +71,7 @@ trace_printk # trace_printk__load unexpected error:
trace_vprintk                            # trace_vprintk__open_and_load unexpected error: -9                           (?)
tracing_struct                           # failed to auto-attach: -524                                                 (trampoline)
trampoline_count                         # prog 'prog1': failed to attach: ERROR: strerror_r(-524)=22                  (trampoline)
type_cast                                # JIT does not support calling kernel function
unpriv_bpf_disabled                      # fentry
user_ringbuf                             # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3                (?)
verif_stats                              # trace_vprintk__open_and_load unexpected error: -9                           (?)
+114 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <network_helpers.h>
#include "type_cast.skel.h"

static void test_xdp(void)
{
	struct type_cast *skel;
	int err, prog_fd;
	char buf[128];

	LIBBPF_OPTS(bpf_test_run_opts, topts,
		.data_in = &pkt_v4,
		.data_size_in = sizeof(pkt_v4),
		.data_out = buf,
		.data_size_out = sizeof(buf),
		.repeat = 1,
	);

	skel = type_cast__open();
	if (!ASSERT_OK_PTR(skel, "skel_open"))
		return;

	bpf_program__set_autoload(skel->progs.md_xdp, true);
	err = type_cast__load(skel);
	if (!ASSERT_OK(err, "skel_load"))
		goto out;

	prog_fd = bpf_program__fd(skel->progs.md_xdp);
	err = bpf_prog_test_run_opts(prog_fd, &topts);
	ASSERT_OK(err, "test_run");
	ASSERT_EQ(topts.retval, XDP_PASS, "xdp test_run retval");

	ASSERT_EQ(skel->bss->ifindex, 1, "xdp_md ifindex");
	ASSERT_EQ(skel->bss->ifindex, skel->bss->ingress_ifindex, "xdp_md ingress_ifindex");
	ASSERT_STREQ(skel->bss->name, "lo", "xdp_md name");
	ASSERT_NEQ(skel->bss->inum, 0, "xdp_md inum");

out:
	type_cast__destroy(skel);
}

static void test_tc(void)
{
	struct type_cast *skel;
	int err, prog_fd;

	LIBBPF_OPTS(bpf_test_run_opts, topts,
		.data_in = &pkt_v4,
		.data_size_in = sizeof(pkt_v4),
		.repeat = 1,
	);

	skel = type_cast__open();
	if (!ASSERT_OK_PTR(skel, "skel_open"))
		return;

	bpf_program__set_autoload(skel->progs.md_skb, true);
	err = type_cast__load(skel);
	if (!ASSERT_OK(err, "skel_load"))
		goto out;

	prog_fd = bpf_program__fd(skel->progs.md_skb);
	err = bpf_prog_test_run_opts(prog_fd, &topts);
	ASSERT_OK(err, "test_run");
	ASSERT_EQ(topts.retval, 0, "tc test_run retval");

	ASSERT_EQ(skel->bss->meta_len, 0, "skb meta_len");
	ASSERT_EQ(skel->bss->frag0_len, 0, "skb frag0_len");
	ASSERT_NEQ(skel->bss->kskb_len, 0, "skb len");
	ASSERT_NEQ(skel->bss->kskb2_len, 0, "skb2 len");
	ASSERT_EQ(skel->bss->kskb_len, skel->bss->kskb2_len, "skb len compare");

out:
	type_cast__destroy(skel);
}

static const char * const negative_tests[] = {
	"untrusted_ptr",
	"kctx_u64",
};

static void test_negative(void)
{
	struct bpf_program *prog;
	struct type_cast *skel;
	int i, err;

	for (i = 0; i < ARRAY_SIZE(negative_tests); i++) {
		skel = type_cast__open();
		if (!ASSERT_OK_PTR(skel, "skel_open"))
			return;

		prog = bpf_object__find_program_by_name(skel->obj, negative_tests[i]);
		if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
			goto out;
		bpf_program__set_autoload(prog, true);
		err = type_cast__load(skel);
		ASSERT_ERR(err, "skel_load");
out:
		type_cast__destroy(skel);
	}
}

void test_type_cast(void)
{
	if (test__start_subtest("xdp"))
		test_xdp();
	if (test__start_subtest("tc"))
		test_tc();
	if (test__start_subtest("negative"))
		test_negative();
}
+83 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>

struct {
	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
	__uint(map_flags, BPF_F_NO_PREALLOC);
	__type(key, int);
	__type(value, long);
} enter_id SEC(".maps");

#define	IFNAMSIZ 16

int ifindex, ingress_ifindex;
char name[IFNAMSIZ];
unsigned int inum;
unsigned int meta_len, frag0_len, kskb_len, kskb2_len;

void *bpf_cast_to_kern_ctx(void *) __ksym;
void *bpf_rdonly_cast(void *, __u32) __ksym;

SEC("?xdp")
int md_xdp(struct xdp_md *ctx)
{
	struct xdp_buff *kctx = bpf_cast_to_kern_ctx(ctx);
	struct net_device *dev;

	dev = kctx->rxq->dev;
	ifindex = dev->ifindex;
	inum = dev->nd_net.net->ns.inum;
	__builtin_memcpy(name, dev->name, IFNAMSIZ);
	ingress_ifindex = ctx->ingress_ifindex;
	return XDP_PASS;
}

SEC("?tc")
int md_skb(struct __sk_buff *skb)
{
	struct sk_buff *kskb = bpf_cast_to_kern_ctx(skb);
	struct skb_shared_info *shared_info;
	struct sk_buff *kskb2;

	kskb_len = kskb->len;

	/* Simulate the following kernel macro:
	 *   #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
	 */
	shared_info = bpf_rdonly_cast(kskb->head + kskb->end,
		bpf_core_type_id_kernel(struct skb_shared_info));
	meta_len = shared_info->meta_len;
	frag0_len = shared_info->frag_list->len;

	/* kskb2 should be equal to kskb */
	kskb2 = bpf_rdonly_cast(kskb, bpf_core_type_id_kernel(struct sk_buff));
	kskb2_len = kskb2->len;
	return 0;
}

SEC("?tp_btf/sys_enter")
int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id)
{
	struct task_struct *task, *task_dup;
	long *ptr;

	task = bpf_get_current_task_btf();
	task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct));
	(void)bpf_task_storage_get(&enter_id, task_dup, 0, 0);
	return 0;
}

SEC("?tracepoint/syscalls/sys_enter_nanosleep")
int kctx_u64(void *ctx)
{
	u64 *kctx = bpf_rdonly_cast(ctx, bpf_core_type_id_kernel(u64));

	(void)kctx;
	return 0;
}

char _license[] SEC("license") = "GPL";