Commit 46291067 authored by Brendan Jackman's avatar Brendan Jackman Committed by Alexei Starovoitov
Browse files

bpf: Pull out a macro for interpreting atomic ALU operations



Since the atomic operations that are added in subsequent commits are
all isomorphic with BPF_ADD, pull out a macro to avoid the
interpreter becoming dominated by lines of atomic-related code.

Note that this sacrificies interpreter performance (combining
STX_ATOMIC_W and STX_ATOMIC_DW into single switch case means that we
need an extra conditional branch to differentiate them) in favour of
compact and (relatively!) simple C code.

Signed-off-by: default avatarBrendan Jackman <jackmanb@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20210114181751.768687-9-jackmanb@google.com
parent 5ffa2550
Loading
Loading
Loading
Loading
+39 −41
Original line number Diff line number Diff line
@@ -1618,55 +1618,53 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
	LDX_PROBE(DW, 8)
#undef LDX_PROBE

#define ATOMIC_ALU_OP(BOP, KOP)						\
		case BOP:						\
			if (BPF_SIZE(insn->code) == BPF_W)		\
				atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
					     (DST + insn->off));	\
			else						\
				atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
					       (DST + insn->off));	\
			break;						\
		case BOP | BPF_FETCH:					\
			if (BPF_SIZE(insn->code) == BPF_W)		\
				SRC = (u32) atomic_fetch_##KOP(		\
					(u32) SRC,			\
					(atomic_t *)(unsigned long) (DST + insn->off)); \
			else						\
				SRC = (u64) atomic64_fetch_##KOP(	\
					(u64) SRC,			\
					(atomic64_t *)(unsigned long) (DST + insn->off)); \
			break;

	STX_ATOMIC_DW:
	STX_ATOMIC_W:
		switch (IMM) {
		case BPF_ADD:
			/* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
			atomic_add((u32) SRC, (atomic_t *)(unsigned long)
				   (DST + insn->off));
			break;
		case BPF_ADD | BPF_FETCH:
			SRC = (u32) atomic_fetch_add(
				(u32) SRC,
				(atomic_t *)(unsigned long) (DST + insn->off));
			break;
		ATOMIC_ALU_OP(BPF_ADD, add)
#undef ATOMIC_ALU_OP

		case BPF_XCHG:
			if (BPF_SIZE(insn->code) == BPF_W)
				SRC = (u32) atomic_xchg(
					(atomic_t *)(unsigned long) (DST + insn->off),
					(u32) SRC);
			break;
		case BPF_CMPXCHG:
			BPF_R0 = (u32) atomic_cmpxchg(
				(atomic_t *)(unsigned long) (DST + insn->off),
				(u32) BPF_R0, (u32) SRC);
			break;
		default:
			goto default_label;
		}
		CONT;

	STX_ATOMIC_DW:
		switch (IMM) {
		case BPF_ADD:
			/* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
			atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
				     (DST + insn->off));
			break;
		case BPF_ADD | BPF_FETCH:
			SRC = (u64) atomic64_fetch_add(
				(u64) SRC,
				(atomic64_t *)(unsigned long) (DST + insn->off));
			break;
		case BPF_XCHG:
			else
				SRC = (u64) atomic64_xchg(
					(atomic64_t *)(unsigned long) (DST + insn->off),
					(u64) SRC);
			break;
		case BPF_CMPXCHG:
			if (BPF_SIZE(insn->code) == BPF_W)
				BPF_R0 = (u32) atomic_cmpxchg(
					(atomic_t *)(unsigned long) (DST + insn->off),
					(u32) BPF_R0, (u32) SRC);
			else
				BPF_R0 = (u64) atomic64_cmpxchg(
					(atomic64_t *)(unsigned long) (DST + insn->off),
					(u64) BPF_R0, (u64) SRC);
			break;

		default:
			goto default_label;
		}