Commit c1754bf0 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Andrii Nakryiko says:

====================
bpf 2022-11-11

We've added 11 non-merge commits during the last 8 day(s) which contain
a total of 11 files changed, 83 insertions(+), 74 deletions(-).

The main changes are:

1) Fix strncpy_from_kernel_nofault() to prevent out-of-bounds writes,
   from Alban Crequy.

2) Fix for bpf_prog_test_run_skb() to prevent wrong alignment,
   from Baisong Zhong.

3) Switch BPF_DISPATCHER to static_call() instead of ftrace infra, with
   a small build fix on top, from Peter Zijlstra and Nathan Chancellor.

4) Fix memory leak in BPF verifier in some error cases, from Wang Yufen.

5) 32-bit compilation error fixes for BPF selftests, from Pu Lehui and
   Yang Jihong.

6) Ensure even distribution of per-CPU free list elements, from Xu Kuohai.

7) Fix copy_map_value() to track special zeroed out areas properly,
   from Xu Kuohai.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf: Fix offset calculation error in __copy_map_value and zero_map_value
  bpf: Initialize same number of free nodes for each pcpu_freelist
  selftests: bpf: Add a test when bpf_probe_read_kernel_str() returns EFAULT
  maccess: Fix writing offset in case of fault in strncpy_from_kernel_nofault()
  selftests/bpf: Fix test_progs compilation failure in 32-bit arch
  selftests/bpf: Fix casting error when cross-compiling test_verifier for 32-bit platforms
  bpf: Fix memory leaks in __check_func_call
  bpf: Add explicit cast to 'void *' for __BPF_DISPATCHER_UPDATE()
  bpf: Convert BPF_DISPATCHER to use static_call() (not ftrace)
  bpf: Revert ("Fix dispatcher patchable function entry to 5 bytes nop")
  bpf, test_run: Fix alignment problem in bpf_prog_test_run_skb()
====================

Link: https://lore.kernel.org/r/20221111231624.938829-1-andrii@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f3a72878 1f6e04a1
Loading
Loading
Loading
Loading
+0 −13
Original line number Diff line number Diff line
@@ -11,7 +11,6 @@
#include <linux/bpf.h>
#include <linux/memory.h>
#include <linux/sort.h>
#include <linux/init.h>
#include <asm/extable.h>
#include <asm/set_memory.h>
#include <asm/nospec-branch.h>
@@ -389,18 +388,6 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
	return ret;
}

int __init bpf_arch_init_dispatcher_early(void *ip)
{
	const u8 *nop_insn = x86_nops[5];

	if (is_endbr(*(u32 *)ip))
		ip += ENDBR_INSN_SIZE;

	if (memcmp(ip, nop_insn, X86_PATCH_SIZE))
		text_poke_early(ip, nop_insn, X86_PATCH_SIZE);
	return 0;
}

int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
		       void *old_addr, void *new_addr)
{
+39 −21
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@
#include <linux/bpfptr.h>
#include <linux/btf.h>
#include <linux/rcupdate_trace.h>
#include <linux/init.h>
#include <linux/static_call.h>

struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -315,7 +315,7 @@ static inline void __copy_map_value(struct bpf_map *map, void *dst, void *src, b
		u32 next_off = map->off_arr->field_off[i];

		memcpy(dst + curr_off, src + curr_off, next_off - curr_off);
		curr_off += map->off_arr->field_sz[i];
		curr_off = next_off + map->off_arr->field_sz[i];
	}
	memcpy(dst + curr_off, src + curr_off, map->value_size - curr_off);
}
@@ -344,7 +344,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst)
		u32 next_off = map->off_arr->field_off[i];

		memset(dst + curr_off, 0, next_off - curr_off);
		curr_off += map->off_arr->field_sz[i];
		curr_off = next_off + map->off_arr->field_sz[i];
	}
	memset(dst + curr_off, 0, map->value_size - curr_off);
}
@@ -954,6 +954,10 @@ struct bpf_dispatcher {
	void *rw_image;
	u32 image_off;
	struct bpf_ksym ksym;
#ifdef CONFIG_HAVE_STATIC_CALL
	struct static_call_key *sc_key;
	void *sc_tramp;
#endif
};

static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
@@ -971,7 +975,33 @@ struct bpf_trampoline *bpf_trampoline_get(u64 key,
					  struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs);
int __init bpf_arch_init_dispatcher_early(void *ip);

/*
 * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn
 * indirection with a direct call to the bpf program. If the architecture does
 * not have STATIC_CALL, avoid a double-indirection.
 */
#ifdef CONFIG_HAVE_STATIC_CALL

#define __BPF_DISPATCHER_SC_INIT(_name)				\
	.sc_key = &STATIC_CALL_KEY(_name),			\
	.sc_tramp = STATIC_CALL_TRAMP_ADDR(_name),

#define __BPF_DISPATCHER_SC(name)				\
	DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func)

#define __BPF_DISPATCHER_CALL(name)				\
	static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func)

#define __BPF_DISPATCHER_UPDATE(_d, _new)			\
	__static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))

#else
#define __BPF_DISPATCHER_SC_INIT(name)
#define __BPF_DISPATCHER_SC(name)
#define __BPF_DISPATCHER_CALL(name)		bpf_func(ctx, insnsi)
#define __BPF_DISPATCHER_UPDATE(_d, _new)
#endif

#define BPF_DISPATCHER_INIT(_name) {				\
	.mutex = __MUTEX_INITIALIZER(_name.mutex),		\
@@ -984,34 +1014,21 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
		.name  = #_name,				\
		.lnode = LIST_HEAD_INIT(_name.ksym.lnode),	\
	},							\
	__BPF_DISPATCHER_SC_INIT(_name##_call)			\
}

#define BPF_DISPATCHER_INIT_CALL(_name)					\
	static int __init _name##_init(void)				\
	{								\
		return bpf_arch_init_dispatcher_early(_name##_func);	\
	}								\
	early_initcall(_name##_init)

#ifdef CONFIG_X86_64
#define BPF_DISPATCHER_ATTRIBUTES __attribute__((patchable_function_entry(5)))
#else
#define BPF_DISPATCHER_ATTRIBUTES
#endif

#define DEFINE_BPF_DISPATCHER(name)					\
	notrace BPF_DISPATCHER_ATTRIBUTES				\
	__BPF_DISPATCHER_SC(name);					\
	noinline __nocfi unsigned int bpf_dispatcher_##name##_func(	\
		const void *ctx,					\
		const struct bpf_insn *insnsi,				\
		bpf_func_t bpf_func)					\
	{								\
		return bpf_func(ctx, insnsi);				\
		return __BPF_DISPATCHER_CALL(name);			\
	}								\
	EXPORT_SYMBOL(bpf_dispatcher_##name##_func);			\
	struct bpf_dispatcher bpf_dispatcher_##name =			\
		BPF_DISPATCHER_INIT(bpf_dispatcher_##name);		\
	BPF_DISPATCHER_INIT_CALL(bpf_dispatcher_##name);
		BPF_DISPATCHER_INIT(bpf_dispatcher_##name);

#define DECLARE_BPF_DISPATCHER(name)					\
	unsigned int bpf_dispatcher_##name##_func(			\
@@ -1019,6 +1036,7 @@ int __init bpf_arch_init_dispatcher_early(void *ip);
		const struct bpf_insn *insnsi,				\
		bpf_func_t bpf_func);					\
	extern struct bpf_dispatcher bpf_dispatcher_##name;

#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
+8 −20
Original line number Diff line number Diff line
@@ -4,7 +4,7 @@
#include <linux/hash.h>
#include <linux/bpf.h>
#include <linux/filter.h>
#include <linux/init.h>
#include <linux/static_call.h>

/* The BPF dispatcher is a multiway branch code generator. The
 * dispatcher is a mechanism to avoid the performance penalty of an
@@ -91,11 +91,6 @@ int __weak arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int n
	return -ENOTSUPP;
}

int __weak __init bpf_arch_init_dispatcher_early(void *ip)
{
	return -ENOTSUPP;
}

static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *buf)
{
	s64 ips[BPF_DISPATCHER_MAX] = {}, *ipsp = &ips[0];
@@ -110,17 +105,11 @@ static int bpf_dispatcher_prepare(struct bpf_dispatcher *d, void *image, void *b

static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
{
	void *old, *new, *tmp;
	u32 noff;
	int err;

	if (!prev_num_progs) {
		old = NULL;
		noff = 0;
	} else {
		old = d->image + d->image_off;
	void *new, *tmp;
	u32 noff = 0;

	if (prev_num_progs)
		noff = d->image_off ^ (PAGE_SIZE / 2);
	}

	new = d->num_progs ? d->image + noff : NULL;
	tmp = d->num_progs ? d->rw_image + noff : NULL;
@@ -134,10 +123,9 @@ static void bpf_dispatcher_update(struct bpf_dispatcher *d, int prev_num_progs)
			return;
	}

	err = bpf_arch_text_poke(d->func, BPF_MOD_JUMP, old, new);
	if (err || !new)
		return;
	__BPF_DISPATCHER_UPDATE(d, new ?: (void *)&bpf_dispatcher_nop_func);

	if (new)
		d->image_off = noff;
}

+11 −12
Original line number Diff line number Diff line
@@ -100,22 +100,21 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
			    u32 nr_elems)
{
	struct pcpu_freelist_head *head;
	int i, cpu, pcpu_entries;
	unsigned int cpu, cpu_idx, i, j, n, m;

	pcpu_entries = nr_elems / num_possible_cpus() + 1;
	i = 0;
	n = nr_elems / num_possible_cpus();
	m = nr_elems % num_possible_cpus();

	cpu_idx = 0;
	for_each_possible_cpu(cpu) {
again:
		head = per_cpu_ptr(s->freelist, cpu);
		j = n + (cpu_idx < m ? 1 : 0);
		for (i = 0; i < j; i++) {
			/* No locking required as this is not visible yet. */
			pcpu_freelist_push_node(head, buf);
		i++;
			buf += elem_size;
		if (i == nr_elems)
			break;
		if (i % pcpu_entries)
			goto again;
		}
		cpu_idx++;
	}
}

+9 −5
Original line number Diff line number Diff line
@@ -6745,11 +6745,11 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
	/* Transfer references to the callee */
	err = copy_reference_state(callee, caller);
	if (err)
		return err;
		goto err_out;

	err = set_callee_state_cb(env, caller, callee, *insn_idx);
	if (err)
		return err;
		goto err_out;

	clear_caller_saved_regs(env, caller->regs);

@@ -6766,6 +6766,11 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
		print_verifier_state(env, callee, true);
	}
	return 0;

err_out:
	free_func_state(callee);
	state->frame[state->curframe + 1] = NULL;
	return err;
}

int map_set_for_each_callback_args(struct bpf_verifier_env *env,
@@ -6979,8 +6984,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
		return -EINVAL;
	}

	state->curframe--;
	caller = state->frame[state->curframe];
	caller = state->frame[state->curframe - 1];
	if (callee->in_callback_fn) {
		/* enforce R0 return value range [0, 1]. */
		struct tnum range = callee->callback_ret_range;
@@ -7019,7 +7023,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
	}
	/* clear everything in the callee */
	free_func_state(callee);
	state->frame[state->curframe + 1] = NULL;
	state->frame[state->curframe--] = NULL;
	return 0;
}

Loading