Commit 6646dc24 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Daniel Borkmann says:

====================
pull-request: bpf-next 2022-03-04

We've added 32 non-merge commits during the last 14 day(s) which contain
a total of 59 files changed, 1038 insertions(+), 473 deletions(-).

The main changes are:

1) Optimize BPF stackmap's build_id retrieval by caching last valid build_id,
   as consecutive stack frames are likely to be in the same VMA and therefore
   have the same build id, from Hao Luo.

2) Several improvements to arm64 BPF JIT, that is, support for JITing
   the atomic[64]_fetch_add, atomic[64]_[fetch_]{and,or,xor} and lastly
   atomic[64]_{xchg|cmpxchg}. Also fix the BTF line info dump for JITed
   programs, from Hou Tao.

3) Optimize generic BPF map batch deletion by only enforcing synchronize_rcu()
   barrier once upon return to user space, from Eric Dumazet.

4) For kernel build parse DWARF and generate BTF through pahole with enabled
   multithreading, from Kui-Feng Lee.

5) BPF verifier usability improvements by making log info more concise and
   replacing inv with scalar type name, from Mykola Lysenko.

6) Two follow-up fixes for BPF prog JIT pack allocator, from Song Liu.

7) Add a new Kconfig to allow for loading kernel modules with non-matching
   BTF type info; their BTF info is then removed on load, from Connor O'Brien.

8) Remove reallocarray() usage from bpftool and switch to libbpf_reallocarray()
   in order to fix compilation errors for older glibc, from Mauricio Vásquez.

9) Fix libbpf to error on conflicting name in BTF when type declaration
   appears before the definition, from Xu Kuohai.

10) Fix issue in BPF preload for in-kernel light skeleton where loaded BPF
    program fds prevent init process from setting up fd 0-2, from Yucong Sun.

11) Fix libbpf reuse of pinned perf RB map when max_entries is auto-determined
    by libbpf, from Stijn Tintel.

12) Several cleanups for libbpf and a fix to enforce perf RB map #pages to be
    non-zero, from Yuntao Wang.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (32 commits)
  bpf: Small BPF verifier log improvements
  libbpf: Add a check to ensure that page_cnt is non-zero
  bpf, x86: Set header->size properly before freeing it
  x86: Disable HAVE_ARCH_HUGE_VMALLOC on 32-bit x86
  bpf, test_run: Fix overflow in XDP frags bpf_test_finish
  selftests/bpf: Update btf_dump case for conflicting names
  libbpf: Skip forward declaration when counting duplicated type names
  bpf: Add some description about BPF_JIT_ALWAYS_ON in Kconfig
  bpf, docs: Add a missing colon in verifier.rst
  bpf: Cache the last valid build_id
  libbpf: Fix BPF_MAP_TYPE_PERF_EVENT_ARRAY auto-pinning
  bpf, selftests: Use raw_tp program for atomic test
  bpf, arm64: Support more atomic operations
  bpftool: Remove redundant slashes
  bpf: Add config to allow loading modules with BTF mismatches
  bpf, arm64: Feed byte-offset into bpf line info
  bpf, arm64: Call build_prologue() first in first JIT pass
  bpf: Fix issue with bpf preload module taking over stdout/stdin of kernel.
  bpftool: Bpf skeletons assert type sizes
  bpf: Cleanup comments
  ...
====================

Link: https://lore.kernel.org/r/20220304164313.31675-1-daniel@iogearbox.net


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 1039135a 7df5072c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -329,7 +329,7 @@ Program with unreachable instructions::
  BPF_EXIT_INSN(),
  };

Error:
Error::

  unreachable insn 1

+0 −12
Original line number Diff line number Diff line
@@ -34,18 +34,6 @@
 */
#define BREAK_INSTR_SIZE		AARCH64_INSN_SIZE

/*
 * BRK instruction encoding
 * The #imm16 value should be placed at bits[20:5] within BRK ins
 */
#define AARCH64_BREAK_MON	0xd4200000

/*
 * BRK instruction for provoking a fault on purpose
 * Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
 */
#define AARCH64_BREAK_FAULT	(AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))

#define AARCH64_BREAK_KGDB_DYN_DBG	\
	(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))

+14 −0
Original line number Diff line number Diff line
@@ -3,7 +3,21 @@
#ifndef __ASM_INSN_DEF_H
#define __ASM_INSN_DEF_H

#include <asm/brk-imm.h>

/* A64 instructions are always 32 bits. */
#define	AARCH64_INSN_SIZE		4

/*
 * BRK instruction encoding
 * The #imm16 value should be placed at bits[20:5] within BRK ins
 */
#define AARCH64_BREAK_MON	0xd4200000

/*
 * BRK instruction for provoking a fault on purpose
 * Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
 */
#define AARCH64_BREAK_FAULT	(AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))

#endif /* __ASM_INSN_DEF_H */
+73 −7
Original line number Diff line number Diff line
@@ -205,7 +205,9 @@ enum aarch64_insn_ldst_type {
	AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
	AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
	AARCH64_INSN_LDST_LOAD_EX,
	AARCH64_INSN_LDST_LOAD_ACQ_EX,
	AARCH64_INSN_LDST_STORE_EX,
	AARCH64_INSN_LDST_STORE_REL_EX,
};

enum aarch64_insn_adsb_type {
@@ -280,6 +282,36 @@ enum aarch64_insn_adr_type {
	AARCH64_INSN_ADR_TYPE_ADR,
};

enum aarch64_insn_mem_atomic_op {
	AARCH64_INSN_MEM_ATOMIC_ADD,
	AARCH64_INSN_MEM_ATOMIC_CLR,
	AARCH64_INSN_MEM_ATOMIC_EOR,
	AARCH64_INSN_MEM_ATOMIC_SET,
	AARCH64_INSN_MEM_ATOMIC_SWP,
};

enum aarch64_insn_mem_order_type {
	AARCH64_INSN_MEM_ORDER_NONE,
	AARCH64_INSN_MEM_ORDER_ACQ,
	AARCH64_INSN_MEM_ORDER_REL,
	AARCH64_INSN_MEM_ORDER_ACQREL,
};

enum aarch64_insn_mb_type {
	AARCH64_INSN_MB_SY,
	AARCH64_INSN_MB_ST,
	AARCH64_INSN_MB_LD,
	AARCH64_INSN_MB_ISH,
	AARCH64_INSN_MB_ISHST,
	AARCH64_INSN_MB_ISHLD,
	AARCH64_INSN_MB_NSH,
	AARCH64_INSN_MB_NSHST,
	AARCH64_INSN_MB_NSHLD,
	AARCH64_INSN_MB_OSH,
	AARCH64_INSN_MB_OSHST,
	AARCH64_INSN_MB_OSHLD,
};

#define	__AARCH64_INSN_FUNCS(abbr, mask, val)				\
static __always_inline bool aarch64_insn_is_##abbr(u32 code)		\
{									\
@@ -303,6 +335,11 @@ __AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
__AARCH64_INSN_FUNCS(load_post,	0x3FE00C00, 0x38400400)
__AARCH64_INSN_FUNCS(str_reg,	0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldadd,	0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldclr,	0x3F20FC00, 0x38201000)
__AARCH64_INSN_FUNCS(ldeor,	0x3F20FC00, 0x38202000)
__AARCH64_INSN_FUNCS(ldset,	0x3F20FC00, 0x38203000)
__AARCH64_INSN_FUNCS(swp,	0x3F20FC00, 0x38208000)
__AARCH64_INSN_FUNCS(cas,	0x3FA07C00, 0x08A07C00)
__AARCH64_INSN_FUNCS(ldr_reg,	0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit,	0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit,	0xFF000000, 0x98000000)
@@ -474,13 +511,6 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
				   enum aarch64_insn_register state,
				   enum aarch64_insn_size_type size,
				   enum aarch64_insn_ldst_type type);
u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
			   enum aarch64_insn_register address,
			   enum aarch64_insn_register value,
			   enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
			   enum aarch64_insn_register value,
			   enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
				 enum aarch64_insn_register src,
				 int imm, enum aarch64_insn_variant variant,
@@ -541,6 +571,42 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
			      enum aarch64_insn_prfm_type type,
			      enum aarch64_insn_prfm_target target,
			      enum aarch64_insn_prfm_policy policy);
#ifdef CONFIG_ARM64_LSE_ATOMICS
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
				  enum aarch64_insn_register address,
				  enum aarch64_insn_register value,
				  enum aarch64_insn_size_type size,
				  enum aarch64_insn_mem_atomic_op op,
				  enum aarch64_insn_mem_order_type order);
u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
			 enum aarch64_insn_register address,
			 enum aarch64_insn_register value,
			 enum aarch64_insn_size_type size,
			 enum aarch64_insn_mem_order_type order);
#else
static inline
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
				  enum aarch64_insn_register address,
				  enum aarch64_insn_register value,
				  enum aarch64_insn_size_type size,
				  enum aarch64_insn_mem_atomic_op op,
				  enum aarch64_insn_mem_order_type order)
{
	return AARCH64_BREAK_FAULT;
}

static inline
u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
			 enum aarch64_insn_register address,
			 enum aarch64_insn_register value,
			 enum aarch64_insn_size_type size,
			 enum aarch64_insn_mem_order_type order)
{
	return AARCH64_BREAK_FAULT;
}
#endif
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);

s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);

+172 −15
Original line number Diff line number Diff line
@@ -578,10 +578,16 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,

	switch (type) {
	case AARCH64_INSN_LDST_LOAD_EX:
	case AARCH64_INSN_LDST_LOAD_ACQ_EX:
		insn = aarch64_insn_get_load_ex_value();
		if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
			insn |= BIT(15);
		break;
	case AARCH64_INSN_LDST_STORE_EX:
	case AARCH64_INSN_LDST_STORE_REL_EX:
		insn = aarch64_insn_get_store_ex_value();
		if (type == AARCH64_INSN_LDST_STORE_REL_EX)
			insn |= BIT(15);
		break;
	default:
		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
@@ -603,12 +609,65 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
					    state);
}

u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
#ifdef CONFIG_ARM64_LSE_ATOMICS
static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
					  u32 insn)
{
	u32 order;

	switch (type) {
	case AARCH64_INSN_MEM_ORDER_NONE:
		order = 0;
		break;
	case AARCH64_INSN_MEM_ORDER_ACQ:
		order = 2;
		break;
	case AARCH64_INSN_MEM_ORDER_REL:
		order = 1;
		break;
	case AARCH64_INSN_MEM_ORDER_ACQREL:
		order = 3;
		break;
	default:
		pr_err("%s: unknown mem order %d\n", __func__, type);
		return AARCH64_BREAK_FAULT;
	}

	insn &= ~GENMASK(23, 22);
	insn |= order << 22;

	return insn;
}

u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
				  enum aarch64_insn_register address,
				  enum aarch64_insn_register value,
			   enum aarch64_insn_size_type size)
				  enum aarch64_insn_size_type size,
				  enum aarch64_insn_mem_atomic_op op,
				  enum aarch64_insn_mem_order_type order)
{
	u32 insn = aarch64_insn_get_ldadd_value();
	u32 insn;

	switch (op) {
	case AARCH64_INSN_MEM_ATOMIC_ADD:
		insn = aarch64_insn_get_ldadd_value();
		break;
	case AARCH64_INSN_MEM_ATOMIC_CLR:
		insn = aarch64_insn_get_ldclr_value();
		break;
	case AARCH64_INSN_MEM_ATOMIC_EOR:
		insn = aarch64_insn_get_ldeor_value();
		break;
	case AARCH64_INSN_MEM_ATOMIC_SET:
		insn = aarch64_insn_get_ldset_value();
		break;
	case AARCH64_INSN_MEM_ATOMIC_SWP:
		insn = aarch64_insn_get_swp_value();
		break;
	default:
		pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
		return AARCH64_BREAK_FAULT;
	}

	switch (size) {
	case AARCH64_INSN_SIZE_32:
@@ -621,6 +680,8 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,

	insn = aarch64_insn_encode_ldst_size(size, insn);

	insn = aarch64_insn_encode_ldst_order(order, insn);

	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
					    result);

@@ -631,17 +692,68 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
					    value);
}

u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
					 u32 insn)
{
	u32 order;

	switch (type) {
	case AARCH64_INSN_MEM_ORDER_NONE:
		order = 0;
		break;
	case AARCH64_INSN_MEM_ORDER_ACQ:
		order = BIT(22);
		break;
	case AARCH64_INSN_MEM_ORDER_REL:
		order = BIT(15);
		break;
	case AARCH64_INSN_MEM_ORDER_ACQREL:
		order = BIT(15) | BIT(22);
		break;
	default:
		pr_err("%s: unknown mem order %d\n", __func__, type);
		return AARCH64_BREAK_FAULT;
	}

	insn &= ~(BIT(15) | BIT(22));
	insn |= order;

	return insn;
}

u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
			 enum aarch64_insn_register address,
			 enum aarch64_insn_register value,
			   enum aarch64_insn_size_type size)
			 enum aarch64_insn_size_type size,
			 enum aarch64_insn_mem_order_type order)
{
	/*
	 * STADD is simply encoded as an alias for LDADD with XZR as
	 * the destination register.
	 */
	return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
				      value, size);
	u32 insn;

	switch (size) {
	case AARCH64_INSN_SIZE_32:
	case AARCH64_INSN_SIZE_64:
		break;
	default:
		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
		return AARCH64_BREAK_FAULT;
	}

	insn = aarch64_insn_get_cas_value();

	insn = aarch64_insn_encode_ldst_size(size, insn);

	insn = aarch64_insn_encode_cas_order(order, insn);

	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
					    result);

	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
					    address);

	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
					    value);
}
#endif

static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
					enum aarch64_insn_prfm_target target,
@@ -1379,7 +1491,7 @@ static u32 aarch64_encode_immediate(u64 imm,
		 * Compute the rotation to get a continuous set of
		 * ones, with the first bit set at position 0
		 */
		ror = fls(~imm);
		ror = fls64(~imm);
	}

	/*
@@ -1456,3 +1568,48 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
}

u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
{
	u32 opt;
	u32 insn;

	switch (type) {
	case AARCH64_INSN_MB_SY:
		opt = 0xf;
		break;
	case AARCH64_INSN_MB_ST:
		opt = 0xe;
		break;
	case AARCH64_INSN_MB_LD:
		opt = 0xd;
		break;
	case AARCH64_INSN_MB_ISH:
		opt = 0xb;
		break;
	case AARCH64_INSN_MB_ISHST:
		opt = 0xa;
		break;
	case AARCH64_INSN_MB_ISHLD:
		opt = 0x9;
		break;
	case AARCH64_INSN_MB_NSH:
		opt = 0x7;
		break;
	case AARCH64_INSN_MB_NSHST:
		opt = 0x6;
		break;
	case AARCH64_INSN_MB_NSHLD:
		opt = 0x5;
		break;
	default:
		pr_err("%s: unknown dmb type %d\n", __func__, type);
		return AARCH64_BREAK_FAULT;
	}

	insn = aarch64_insn_get_dmb_value();
	insn &= ~GENMASK(11, 8);
	insn |= (opt << 8);

	return insn;
}
Loading