Commit 7c7e3d31 authored by Song Liu's avatar Song Liu Committed by Alexei Starovoitov
Browse files

bpf: Introduce helper bpf_find_vma



In some profiler use cases, it is necessary to map an address to the
backing file, e.g., a shared library. bpf_find_vma helper provides a
flexible way to achieve this. bpf_find_vma maps an address of a task to
the vma (vm_area_struct) for this address, and feed the vma to an callback
BPF function. The callback function is necessary here, as we need to
ensure mmap_sem is unlocked.

It is necessary to lock mmap_sem for find_vma. To lock and unlock mmap_sem
safely when irqs are disable, we use the same mechanism as stackmap with
build_id. Specifically, when irqs are disabled, the unlocked is postponed
in an irq_work. Refactor stackmap.c so that the irq_work is shared among
bpf_find_vma and stackmap helpers.

Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Tested-by: default avatarHengqi Chen <hengqi.chen@gmail.com>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20211105232330.1936330-2-songliubraving@fb.com
parent 5fd79ed9
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -2157,6 +2157,7 @@ extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
extern const struct bpf_func_proto bpf_find_vma_proto;

const struct bpf_func_proto *tracing_prog_func_proto(
  enum bpf_func_id func_id, const struct bpf_prog *prog);
+20 −0
Original line number Diff line number Diff line
@@ -4938,6 +4938,25 @@ union bpf_attr {
 *		**-ENOENT** if symbol is not found.
 *
 *		**-EPERM** if caller does not have permission to obtain kernel address.
 *
 * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
 *	Description
 *		Find vma of *task* that contains *addr*, call *callback_fn*
 *		function with *task*, *vma*, and *callback_ctx*.
 *		The *callback_fn* should be a static function and
 *		the *callback_ctx* should be a pointer to the stack.
 *		The *flags* is used to control certain aspects of the helper.
 *		Currently, the *flags* must be 0.
 *
 *		The expected callback signature is
 *
 *		long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
 *
 *	Return
 *		0 on success.
 *		**-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
 *		**-EBUSY** if failed to try lock mmap_lock.
 *		**-EINVAL** for invalid **flags**.
 */
#define __BPF_FUNC_MAPPER(FN)		\
	FN(unspec),			\
@@ -5120,6 +5139,7 @@ union bpf_attr {
	FN(trace_vprintk),		\
	FN(skc_to_unix_sock),		\
	FN(kallsyms_lookup_name),	\
	FN(find_vma),			\
	/* */

/* integer value in 'imm' field of BPF_CALL instruction selects which helper
+4 −1
Original line number Diff line number Diff line
@@ -6342,7 +6342,10 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
	.arg4_type	= ARG_ANYTHING,
};

BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
BTF_ID_LIST_GLOBAL(btf_task_struct_ids)
BTF_ID(struct, task_struct)
BTF_ID(struct, file)
BTF_ID(struct, vm_area_struct)

/* BTF ID set registration API for modules */

+65 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2021 Facebook
 */

#ifndef __MMAP_UNLOCK_WORK_H__
#define __MMAP_UNLOCK_WORK_H__
#include <linux/irq_work.h>

/* irq_work to run mmap_read_unlock() in irq_work */
struct mmap_unlock_irq_work {
	struct irq_work irq_work;
	struct mm_struct *mm;
};

DECLARE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);

/*
 * We cannot do mmap_read_unlock() when the irq is disabled, because of
 * risk to deadlock with rq_lock. To look up vma when the irqs are
 * disabled, we need to run mmap_read_unlock() in irq_work. We use a
 * percpu variable to do the irq_work. If the irq_work is already used
 * by another lookup, we fall over.
 */
static inline bool bpf_mmap_unlock_get_irq_work(struct mmap_unlock_irq_work **work_ptr)
{
	struct mmap_unlock_irq_work *work = NULL;
	bool irq_work_busy = false;

	if (irqs_disabled()) {
		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
			work = this_cpu_ptr(&mmap_unlock_work);
			if (irq_work_is_busy(&work->irq_work)) {
				/* cannot queue more up_read, fallback */
				irq_work_busy = true;
			}
		} else {
			/*
			 * PREEMPT_RT does not allow to trylock mmap sem in
			 * interrupt disabled context. Force the fallback code.
			 */
			irq_work_busy = true;
		}
	}

	*work_ptr = work;
	return irq_work_busy;
}

static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)
{
	if (!work) {
		mmap_read_unlock(mm);
	} else {
		work->mm = mm;

		/* The lock will be released once we're out of interrupt
		 * context. Tell lockdep that we've released it now so
		 * it doesn't complain that we forgot to release it.
		 */
		rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
		irq_work_queue(&work->irq_work);
	}
}

#endif /* __MMAP_UNLOCK_WORK_H__ */
+7 −73
Original line number Diff line number Diff line
@@ -7,10 +7,10 @@
#include <linux/kernel.h>
#include <linux/stacktrace.h>
#include <linux/perf_event.h>
#include <linux/irq_work.h>
#include <linux/btf_ids.h>
#include <linux/buildid.h>
#include "percpu_freelist.h"
#include "mmap_unlock_work.h"

#define STACK_CREATE_FLAG_MASK					\
	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY |	\
@@ -31,25 +31,6 @@ struct bpf_stack_map {
	struct stack_map_bucket *buckets[];
};

/* irq_work to run up_read() for build_id lookup in nmi context */
struct stack_map_irq_work {
	struct irq_work irq_work;
	struct mm_struct *mm;
};

static void do_up_read(struct irq_work *entry)
{
	struct stack_map_irq_work *work;

	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
		return;

	work = container_of(entry, struct stack_map_irq_work, irq_work);
	mmap_read_unlock_non_owner(work->mm);
}

static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);

static inline bool stack_map_use_build_id(struct bpf_map *map)
{
	return (map->map_flags & BPF_F_STACK_BUILD_ID);
@@ -149,35 +130,13 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
					  u64 *ips, u32 trace_nr, bool user)
{
	int i;
	struct mmap_unlock_irq_work *work = NULL;
	bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
	struct vm_area_struct *vma;
	bool irq_work_busy = false;
	struct stack_map_irq_work *work = NULL;

	if (irqs_disabled()) {
		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
			work = this_cpu_ptr(&up_read_work);
			if (irq_work_is_busy(&work->irq_work)) {
				/* cannot queue more up_read, fallback */
				irq_work_busy = true;
			}
		} else {
			/*
			 * PREEMPT_RT does not allow to trylock mmap sem in
			 * interrupt disabled context. Force the fallback code.
			 */
			irq_work_busy = true;
		}
	}

	/*
	 * We cannot do up_read() when the irq is disabled, because of
	 * risk to deadlock with rq_lock. To do build_id lookup when the
	 * irqs are disabled, we need to run up_read() in irq_work. We use
	 * a percpu variable to do the irq_work. If the irq_work is
	 * already used by another lookup, we fall back to report ips.
	 *
	 * Same fallback is used for kernel stack (!user) on a stackmap
	 * with build_id.
	/* If the irq_work is in use, fall back to report ips. Same
	 * fallback is used for kernel stack (!user) on a stackmap with
	 * build_id.
	 */
	if (!user || !current || !current->mm || irq_work_busy ||
	    !mmap_read_trylock(current->mm)) {
@@ -203,19 +162,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
			- vma->vm_start;
		id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
	}

	if (!work) {
		mmap_read_unlock(current->mm);
	} else {
		work->mm = current->mm;

		/* The lock will be released once we're out of interrupt
		 * context. Tell lockdep that we've released it now so
		 * it doesn't complain that we forgot to release it.
		 */
		rwsem_release(&current->mm->mmap_lock.dep_map, _RET_IP_);
		irq_work_queue(&work->irq_work);
	}
	bpf_mmap_unlock_mm(work, current->mm);
}

static struct perf_callchain_entry *
@@ -719,16 +666,3 @@ const struct bpf_map_ops stack_trace_map_ops = {
	.map_btf_name = "bpf_stack_map",
	.map_btf_id = &stack_trace_map_btf_id,
};

static int __init stack_map_init(void)
{
	int cpu;
	struct stack_map_irq_work *work;

	for_each_possible_cpu(cpu) {
		work = per_cpu_ptr(&up_read_work, cpu);
		init_irq_work(&work->irq_work, do_up_read);
	}
	return 0;
}
subsys_initcall(stack_map_init);
Loading