Commit fbe8870f authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Daniel Borkmann says:

====================
bpf 2022-08-10

We've added 23 non-merge commits during the last 7 day(s) which contain
a total of 19 files changed, 424 insertions(+), 35 deletions(-).

The main changes are:

1) Several fixes for BPF map iterator such as UAFs along with selftests, from Hou Tao.

2) Fix BPF syscall program's {copy,strncpy}_from_bpfptr() to not fault, from Jinghao Jia.

3) Reject BPF syscall programs calling BPF_PROG_RUN, from Alexei Starovoitov and YiFei Zhu.

4) Fix attach_btf_obj_id info to pick proper target BTF, from Stanislav Fomichev.

5) BPF design Q/A doc update to clarify what is not stable ABI, from Paul E. McKenney.

6) Fix BPF map's prealloc_lru_pop to not reinitialize, from Kumar Kartikeya Dwivedi.

7) Fix bpf_trampoline_put to avoid leaking ftrace hash, from Jiri Olsa.

8) Fix arm64 JIT to address sparse errors around BPF trampoline, from Xu Kuohai.

9) Fix arm64 JIT to use kvcalloc instead of kcalloc for internal program address
   offset buffer, from Aijun Sun.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: (23 commits)
  selftests/bpf: Ensure sleepable program is rejected by hash map iter
  selftests/bpf: Add write tests for sk local storage map iterator
  selftests/bpf: Add tests for reading a dangling map iter fd
  bpf: Only allow sleepable program for resched-able iterator
  bpf: Check the validity of max_rdwr_access for sock local storage map iterator
  bpf: Acquire map uref in .init_seq_private for sock{map,hash} iterator
  bpf: Acquire map uref in .init_seq_private for sock local storage map iterator
  bpf: Acquire map uref in .init_seq_private for hash map iterator
  bpf: Acquire map uref in .init_seq_private for array map iterator
  bpf: Disallow bpf programs call prog_run command.
  bpf, arm64: Fix bpf trampoline instruction endianness
  selftests/bpf: Add test for prealloc_lru_pop bug
  bpf: Don't reinit map value in prealloc_lru_pop
  bpf: Allow calling bpf_prog_test kfuncs in tracing programs
  bpf, arm64: Allocate program buffer using kvcalloc instead of kcalloc
  selftests/bpf: Excercise bpf_obj_get_info_by_fd for bpf2bpf
  bpf: Use proper target btf when exporting attach_btf_obj_id
  mptcp, btf: Add struct mptcp_sock definition when CONFIG_MPTCP is disabled
  bpf: Cleanup ftrace hash in bpf_trampoline_put
  BPF: Fix potential bad pointer dereference in bpf_sys_bpf()
  ...
====================

Link: https://lore.kernel.org/r/20220810190624.10748-1-daniel@iogearbox.net


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents dd48f383 e7c677bd
Loading
Loading
Loading
Loading
+25 −0
Original line number Diff line number Diff line
@@ -214,6 +214,12 @@ A: NO. Tracepoints are tied to internal implementation details hence they are
subject to change and can break with newer kernels. BPF programs need to change
accordingly when this happens.

Q: Are places where kprobes can attach part of the stable ABI?
--------------------------------------------------------------
A: NO. The places to which kprobes can attach are internal implementation
details, which means that they are subject to change and can break with
newer kernels. BPF programs need to change accordingly when this happens.

Q: How much stack space a BPF program uses?
-------------------------------------------
A: Currently all program types are limited to 512 bytes of stack
@@ -273,3 +279,22 @@ cc (congestion-control) implementations. If any of these kernel
functions has changed, both the in-tree and out-of-tree kernel tcp cc
implementations have to be changed.  The same goes for the bpf
programs and they have to be adjusted accordingly.

Q: Attaching to arbitrary kernel functions is an ABI?
-----------------------------------------------------
Q: BPF programs can be attached to many kernel functions.  Do these
kernel functions become part of the ABI?

A: NO.

The kernel function prototypes will change, and BPF programs attaching to
them will need to change.  The BPF compile-once-run-everywhere (CO-RE)
should be used in order to make it easier to adapt your BPF programs to
different versions of the kernel.

Q: Marking a function with BTF_ID makes that function an ABI?
-------------------------------------------------------------
A: NO.

The BTF_ID macro does not cause a function to become part of the ABI
any more than does the EXPORT_SYMBOL_GPL macro.
+8 −8
Original line number Diff line number Diff line
@@ -1496,7 +1496,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
	memset(&ctx, 0, sizeof(ctx));
	ctx.prog = prog;

	ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
	ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
	if (ctx.offset == NULL) {
		prog = orig_prog;
		goto out_off;
@@ -1601,7 +1601,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
			ctx.offset[i] *= AARCH64_INSN_SIZE;
		bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off:
		kfree(ctx.offset);
		kvfree(ctx.offset);
		kfree(jit_data);
		prog->aux->jit_data = NULL;
	}
@@ -1643,7 +1643,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
			    int args_off, int retval_off, int run_ctx_off,
			    bool save_ret)
{
	u32 *branch;
	__le32 *branch;
	u64 enter_prog;
	u64 exit_prog;
	struct bpf_prog *p = l->link.prog;
@@ -1698,7 +1698,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,

	if (ctx->image) {
		int offset = &ctx->image[ctx->idx] - branch;
		*branch = A64_CBZ(1, A64_R(0), offset);
		*branch = cpu_to_le32(A64_CBZ(1, A64_R(0), offset));
	}

	/* arg1: prog */
@@ -1713,7 +1713,7 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,

static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
			       int args_off, int retval_off, int run_ctx_off,
			       u32 **branches)
			       __le32 **branches)
{
	int i;

@@ -1784,7 +1784,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
	bool save_ret;
	u32 **branches = NULL;
	__le32 **branches = NULL;

	/* trampoline stack layout:
	 *                  [ parent ip         ]
@@ -1892,7 +1892,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
				flags & BPF_TRAMP_F_RET_FENTRY_RET);

	if (fmod_ret->nr_links) {
		branches = kcalloc(fmod_ret->nr_links, sizeof(u32 *),
		branches = kcalloc(fmod_ret->nr_links, sizeof(__le32 *),
				   GFP_KERNEL);
		if (!branches)
			return -ENOMEM;
@@ -1916,7 +1916,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
	/* update the branches saved in invoke_bpf_mod_ret with cbnz */
	for (i = 0; i < fmod_ret->nr_links && ctx->image != NULL; i++) {
		int offset = &ctx->image[ctx->idx] - branches[i];
		*branches[i] = A64_CBNZ(1, A64_R(10), offset);
		*branches[i] = cpu_to_le32(A64_CBNZ(1, A64_R(10), offset));
	}

	for (i = 0; i < fexit->nr_links; i++)
+6 −2
Original line number Diff line number Diff line
@@ -49,7 +49,9 @@ static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
					  size_t offset, size_t size)
{
	return copy_from_sockptr_offset(dst, (sockptr_t) src, offset, size);
	if (!bpfptr_is_kernel(src))
		return copy_from_user(dst, src.user + offset, size);
	return copy_from_kernel_nofault(dst, src.kernel + offset, size);
}

static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
@@ -78,7 +80,9 @@ static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len)

static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
{
	return strncpy_from_sockptr(dst, (sockptr_t) src, count);
	if (bpfptr_is_kernel(src))
		return strncpy_from_kernel_nofault(dst, src.kernel, count);
	return strncpy_from_user(dst, src.user, count);
}

#endif /* _LINUX_BPFPTR_H */
+4 −0
Original line number Diff line number Diff line
@@ -291,4 +291,8 @@ struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk);
static inline struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) { return NULL; }
#endif

#if !IS_ENABLED(CONFIG_MPTCP)
struct mptcp_sock { };
#endif

#endif /* __NET_MPTCP_H */
+6 −0
Original line number Diff line number Diff line
@@ -649,6 +649,11 @@ static int bpf_iter_init_array_map(void *priv_data,
		seq_info->percpu_value_buf = value_buf;
	}

	/* bpf_iter_attach_map() acquires a map uref, and the uref may be
	 * released before or in the middle of iterating map elements, so
	 * acquire an extra map uref for iterator.
	 */
	bpf_map_inc_with_uref(map);
	seq_info->map = map;
	return 0;
}
@@ -657,6 +662,7 @@ static void bpf_iter_fini_array_map(void *priv_data)
{
	struct bpf_iter_seq_array_map_info *seq_info = priv_data;

	bpf_map_put_with_uref(seq_info->map);
	kfree(seq_info->percpu_value_buf);
}

Loading