Commit fdf45787 authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'bpf: Avoid unnecessary deadlock detection and failure in task storage'



Martin KaFai Lau says:

====================

From: Martin KaFai Lau <martin.lau@kernel.org>

The commit bc235cdb ("bpf: Prevent deadlock from recursive bpf_task_storage_[get|delete]")
added deadlock detection to avoid a tracing program from recurring
on the bpf_task_storage_{get,delete}() helpers.  These helpers acquire
a spin lock and it will lead to deadlock.

It is unnecessary for the bpf_lsm and bpf_iter programs which do
not recur.  The situation is the same as the existing
bpf_pid_task_storage_{lookup,delete}_elem() which are
used in the syscall and they also do not have deadlock detection.

This set is to add new bpf_task_storage_{get,delete}() helper proto
without the deadlock detection.  The set also removes the prog->active
check from the bpf_lsm and bpf_iter program.  Please see the individual
patch for details.
====================

Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents f3c51fe0 387b5321
Loading
Loading
Loading
Loading
+2 −7
Original line number Diff line number Diff line
@@ -1649,13 +1649,8 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
	struct bpf_prog *p = l->link.prog;
	int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);

	if (p->aux->sleepable) {
		enter_prog = (u64)__bpf_prog_enter_sleepable;
		exit_prog = (u64)__bpf_prog_exit_sleepable;
	} else {
		enter_prog = (u64)__bpf_prog_enter;
		exit_prog = (u64)__bpf_prog_exit;
	}
	enter_prog = (u64)bpf_trampoline_enter(p);
	exit_prog = (u64)bpf_trampoline_exit(p);

	if (l->cookie == 0) {
		/* if cookie is zero, one instruction is enough to store it */
+2 −17
Original line number Diff line number Diff line
@@ -1894,10 +1894,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
			   struct bpf_tramp_link *l, int stack_size,
			   int run_ctx_off, bool save_ret)
{
	void (*exit)(struct bpf_prog *prog, u64 start,
		     struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit;
	u64 (*enter)(struct bpf_prog *prog,
		     struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter;
	u8 *prog = *pprog;
	u8 *jmp_insn;
	int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
@@ -1916,23 +1912,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
	 */
	emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);

	if (p->aux->sleepable) {
		enter = __bpf_prog_enter_sleepable;
		exit = __bpf_prog_exit_sleepable;
	} else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) {
		enter = __bpf_prog_enter_struct_ops;
		exit = __bpf_prog_exit_struct_ops;
	} else if (p->expected_attach_type == BPF_LSM_CGROUP) {
		enter = __bpf_prog_enter_lsm_cgroup;
		exit = __bpf_prog_exit_lsm_cgroup;
	}

	/* arg1: mov rdi, progs[i] */
	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
	/* arg2: lea rsi, [rbp - ctx_cookie_off] */
	EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);

	if (emit_call(&prog, enter, prog))
	if (emit_call(&prog, bpf_trampoline_enter(p), prog))
		return -EINVAL;
	/* remember prog start time returned by __bpf_prog_enter */
	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
@@ -1977,7 +1962,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
	emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
	/* arg3: lea rdx, [rbp - run_ctx_off] */
	EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
	if (emit_call(&prog, exit, prog))
	if (emit_call(&prog, bpf_trampoline_exit(p), prog))
		return -EINVAL;

	*pprog = prog;
+12 −14
Original line number Diff line number Diff line
@@ -854,22 +854,18 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *i
				const struct btf_func_model *m, u32 flags,
				struct bpf_tramp_links *tlinks,
				void *orig_call);
/* these two functions are called from generated trampoline */
u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
					     struct bpf_tramp_run_ctx *run_ctx);
u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
					struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
					struct bpf_tramp_run_ctx *run_ctx);
u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
					struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
					     struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
				      struct bpf_tramp_run_ctx *run_ctx);
typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
				      struct bpf_tramp_run_ctx *run_ctx);
bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);

struct bpf_ksym {
	unsigned long		 start;
@@ -2523,7 +2519,9 @@ extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
extern const struct bpf_func_proto bpf_sock_from_file_proto;
extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
+14 −1
Original line number Diff line number Diff line
@@ -642,10 +642,23 @@ static inline u32 type_flag(u32 type)
}

/* only use after check_attach_btf_id() */
static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
{
	return prog->type == BPF_PROG_TYPE_EXT ?
		prog->aux->dst_prog->type : prog->type;
}

static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
{
	switch (resolve_prog_type(prog)) {
	case BPF_PROG_TYPE_TRACING:
		return prog->expected_attach_type != BPF_TRACE_ITER;
	case BPF_PROG_TYPE_STRUCT_OPS:
	case BPF_PROG_TYPE_LSM:
		return false;
	default:
		return true;
	}
}

#endif /* _LINUX_BPF_VERIFIER_H */
+1 −0
Original line number Diff line number Diff line
@@ -242,6 +242,7 @@ void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
	__bpf_selem_unlink_storage(selem, use_trace_rcu);
}

/* If cacheit_lockit is false, this lookup function is lockless */
struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
			 struct bpf_local_storage_map *smap,
Loading