Commit cb80ddc6 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann
Browse files

bpf: Convert bpf_preload.ko to use light skeleton.



The main change is a move of the single line
  #include "iterators.lskel.h"
from iterators/iterators.c to bpf_preload_kern.c.
Which means that generated light skeleton can be used from user space or
user mode driver like iterators.c or from the kernel module or the kernel itself.
The direct use of light skeleton from the kernel module simplifies the code,
since UMD is no longer necessary. The libbpf.a required user space and UMD. The
CO-RE in the kernel and generated "loader bpf program" used by the light
skeleton are capable to perform complex loading operations traditionally
provided by libbpf. In addition UMD approach was launching UMD process
every time bpffs has to be mounted. With light skeleton in the kernel
the bpf_preload kernel module loads bpf iterators once and pins them
multiple times into different bpffs mounts.

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220209232001.27490-6-alexei.starovoitov@gmail.com
parent d7beb3d6
Loading
Loading
Loading
Loading
+10 −29
Original line number Diff line number Diff line
@@ -710,11 +710,10 @@ static DEFINE_MUTEX(bpf_preload_lock);
static int populate_bpffs(struct dentry *parent)
{
	struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
	struct bpf_link *links[BPF_PRELOAD_LINKS] = {};
	int err = 0, i;

	/* grab the mutex to make sure the kernel interactions with bpf_preload
	 * UMD are serialized
	 * are serialized
	 */
	mutex_lock(&bpf_preload_lock);

@@ -722,40 +721,22 @@ static int populate_bpffs(struct dentry *parent)
	if (!bpf_preload_mod_get())
		goto out;

	if (!bpf_preload_ops->info.tgid) {
		/* preload() will start UMD that will load BPF iterator programs */
	err = bpf_preload_ops->preload(objs);
	if (err)
		goto out_put;
	for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
			links[i] = bpf_link_by_id(objs[i].link_id);
			if (IS_ERR(links[i])) {
				err = PTR_ERR(links[i]);
				goto out_put;
			}
		}
		for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
		bpf_link_inc(objs[i].link);
		err = bpf_iter_link_pin_kernel(parent,
						       objs[i].link_name, links[i]);
			if (err)
					       objs[i].link_name, objs[i].link);
		if (err) {
			bpf_link_put(objs[i].link);
			goto out_put;
			/* do not unlink successfully pinned links even
			 * if later link fails to pin
			 */
			links[i] = NULL;
		}
		/* finish() will tell UMD process to exit */
		err = bpf_preload_ops->finish();
		if (err)
			goto out_put;
	}
out_put:
	bpf_preload_mod_put();
out:
	mutex_unlock(&bpf_preload_lock);
	for (i = 0; i < BPF_PRELOAD_LINKS && err; i++)
		if (!IS_ERR_OR_NULL(links[i]))
			bpf_link_put(links[i]);
	return err;
}

+3 −4
Original line number Diff line number Diff line
@@ -18,10 +18,9 @@ menuconfig BPF_PRELOAD

if BPF_PRELOAD
config BPF_PRELOAD_UMD
	tristate "bpf_preload kernel module with user mode driver"
	depends on CC_CAN_LINK
	depends on m || CC_CAN_LINK_STATIC
	tristate "bpf_preload kernel module"
	default m
	help
	  This builds bpf_preload kernel module with embedded user mode driver.
	  This builds bpf_preload kernel module with embedded BPF programs for
	  introspection in bpffs.
endif
+2 −12
Original line number Diff line number Diff line
@@ -3,16 +3,6 @@
LIBBPF_SRCS = $(srctree)/tools/lib/bpf/
LIBBPF_INCLUDE = $(LIBBPF_SRCS)/..

userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
	-I $(LIBBPF_INCLUDE) -Wno-unused-result

userprogs := bpf_preload_umd

bpf_preload_umd-objs := iterators/iterators.o

$(obj)/bpf_preload_umd:

$(obj)/bpf_preload_umd_blob.o: $(obj)/bpf_preload_umd

obj-$(CONFIG_BPF_PRELOAD_UMD) += bpf_preload.o
bpf_preload-objs += bpf_preload_kern.o bpf_preload_umd_blob.o
CFLAGS_bpf_preload_kern.o += -I $(LIBBPF_INCLUDE)
bpf_preload-objs += bpf_preload_kern.o
+4 −4
Original line number Diff line number Diff line
@@ -2,13 +2,13 @@
#ifndef _BPF_PRELOAD_H
#define _BPF_PRELOAD_H

#include <linux/usermode_driver.h>
#include "iterators/bpf_preload_common.h"
struct bpf_preload_info {
	char link_name[16];
	struct bpf_link *link;
};

struct bpf_preload_ops {
        struct umd_info info;
	int (*preload)(struct bpf_preload_info *);
	int (*finish)(void);
	struct module *owner;
};
extern struct bpf_preload_ops *bpf_preload_ops;
+49 −70
Original line number Diff line number Diff line
@@ -2,101 +2,80 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pid.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
#include "bpf_preload.h"
#include "iterators/iterators.lskel.h"

extern char bpf_preload_umd_start;
extern char bpf_preload_umd_end;
static struct bpf_link *maps_link, *progs_link;
static struct iterators_bpf *skel;

static int preload(struct bpf_preload_info *obj);
static int finish(void);
static void free_links_and_skel(void)
{
	if (!IS_ERR_OR_NULL(maps_link))
		bpf_link_put(maps_link);
	if (!IS_ERR_OR_NULL(progs_link))
		bpf_link_put(progs_link);
	iterators_bpf__destroy(skel);
}

static int preload(struct bpf_preload_info *obj)
{
	strlcpy(obj[0].link_name, "maps.debug", sizeof(obj[0].link_name));
	obj[0].link = maps_link;
	strlcpy(obj[1].link_name, "progs.debug", sizeof(obj[1].link_name));
	obj[1].link = progs_link;
	return 0;
}

static struct bpf_preload_ops umd_ops = {
	.info.driver_name = "bpf_preload",
static struct bpf_preload_ops ops = {
	.preload = preload,
	.finish = finish,
	.owner = THIS_MODULE,
};

static int preload(struct bpf_preload_info *obj)
static int load_skel(void)
{
	int magic = BPF_PRELOAD_START;
	loff_t pos = 0;
	int i, err;
	ssize_t n;
	int err;

	err = fork_usermode_driver(&umd_ops.info);
	skel = iterators_bpf__open();
	if (!skel)
		return -ENOMEM;
	err = iterators_bpf__load(skel);
	if (err)
		return err;

	/* send the start magic to let UMD proceed with loading BPF progs */
	n = kernel_write(umd_ops.info.pipe_to_umh,
			 &magic, sizeof(magic), &pos);
	if (n != sizeof(magic))
		return -EPIPE;

	/* receive bpf_link IDs and names from UMD */
	pos = 0;
	for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
		n = kernel_read(umd_ops.info.pipe_from_umh,
				&obj[i], sizeof(*obj), &pos);
		if (n != sizeof(*obj))
			return -EPIPE;
	}
	return 0;
		goto out;
	err = iterators_bpf__attach(skel);
	if (err)
		goto out;
	maps_link = bpf_link_get_from_fd(skel->links.dump_bpf_map_fd);
	if (IS_ERR(maps_link)) {
		err = PTR_ERR(maps_link);
		goto out;
	}

static int finish(void)
{
	int magic = BPF_PRELOAD_END;
	struct pid *tgid;
	loff_t pos = 0;
	ssize_t n;

	/* send the last magic to UMD. It will do a normal exit. */
	n = kernel_write(umd_ops.info.pipe_to_umh,
			 &magic, sizeof(magic), &pos);
	if (n != sizeof(magic))
		return -EPIPE;

	tgid = umd_ops.info.tgid;
	if (tgid) {
		wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
		umd_cleanup_helper(&umd_ops.info);
	progs_link = bpf_link_get_from_fd(skel->links.dump_bpf_prog_fd);
	if (IS_ERR(progs_link)) {
		err = PTR_ERR(progs_link);
		goto out;
	}
	return 0;
out:
	free_links_and_skel();
	return err;
}

static int __init load_umd(void)
static int __init load(void)
{
	int err;

	err = umd_load_blob(&umd_ops.info, &bpf_preload_umd_start,
			    &bpf_preload_umd_end - &bpf_preload_umd_start);
	err = load_skel();
	if (err)
		return err;
	bpf_preload_ops = &umd_ops;
	bpf_preload_ops = &ops;
	return err;
}

static void __exit fini_umd(void)
static void __exit fini(void)
{
	struct pid *tgid;

	bpf_preload_ops = NULL;

	/* kill UMD in case it's still there due to earlier error */
	tgid = umd_ops.info.tgid;
	if (tgid) {
		kill_pid(tgid, SIGKILL, 1);

		wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
		umd_cleanup_helper(&umd_ops.info);
	}
	umd_unload_blob(&umd_ops.info);
	free_links_and_skel();
}
late_initcall(load_umd);
module_exit(fini_umd);
late_initcall(load);
module_exit(fini);
MODULE_LICENSE("GPL");
Loading