Commit ecdf985d authored by Eduard Zingerman's avatar Eduard Zingerman Committed by Alexei Starovoitov
Browse files

bpf: track immediate values written to stack by BPF_ST instruction



For aligned stack writes using BPF_ST instruction track stored values
in a same way BPF_STX is handled, e.g. make sure that the following
commands produce similar verifier knowledge:

  fp[-8] = 42;             r1 = 42;
                       fp[-8] = r1;

This covers two cases:
 - non-null values written to stack are stored as spill of fake
   registers;
 - null values written to stack are stored as STACK_ZERO marks.

Previously both cases above used STACK_MISC marks instead.

Some verifier test cases relied on the old logic to obtain STACK_MISC
marks for some stack values. These test cases are updated in the same
commit to avoid failures during bisect.

Signed-off-by: default avatarEduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230214232030.1502829-2-eddyz87@gmail.com


Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 62d101d5
Loading
Loading
Loading
Loading
+16 −2
Original line number Diff line number Diff line
@@ -3473,6 +3473,11 @@ static void save_register_state(struct bpf_func_state *state,
		scrub_spilled_slot(&state->stack[spi].slot_type[i - 1]);
}
static bool is_bpf_st_mem(struct bpf_insn *insn)
{
	return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
}
/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
 * stack boundary and alignment are checked in check_mem_access()
 */
@@ -3484,8 +3489,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
{
	struct bpf_func_state *cur; /* state of the current function */
	int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
	u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
	struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
	struct bpf_reg_state *reg = NULL;
	u32 dst_reg = insn->dst_reg;
	err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
	if (err)
@@ -3538,6 +3544,13 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
				return err;
		}
		save_register_state(state, spi, reg, size);
	} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
		   insn->imm != 0 && env->bpf_capable) {
		struct bpf_reg_state fake_reg = {};
		__mark_reg_known(&fake_reg, (u32)insn->imm);
		fake_reg.type = SCALAR_VALUE;
		save_register_state(state, spi, &fake_reg, size);
	} else if (reg && is_spillable_regtype(reg->type)) {
		/* register containing pointer is being spilled into stack */
		if (size != BPF_REG_SIZE) {
@@ -3572,7 +3585,8 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
			state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
		/* when we zero initialize stack slots mark them as such */
		if (reg && register_is_null(reg)) {
		if ((reg && register_is_null(reg)) ||
		    (!reg && is_bpf_st_mem(insn) && insn->imm == 0)) {
			/* backtracking doesn't work for STACK_ZERO yet. */
			err = mark_chain_precision(env, value_regno);
			if (err)
+64 −46
Original line number Diff line number Diff line
{
	"bounds checks mixing signed and unsigned, positive bounds",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, 2),
	BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
@@ -17,20 +18,21 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
{
	"bounds checks mixing signed and unsigned",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, -1),
	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
@@ -40,20 +42,21 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
{
	"bounds checks mixing signed and unsigned, variant 2",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, -1),
	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
@@ -65,20 +68,21 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
{
	"bounds checks mixing signed and unsigned, variant 3",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, -1),
	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
@@ -89,20 +93,21 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
{
	"bounds checks mixing signed and unsigned, variant 4",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, 1),
	BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
@@ -112,19 +117,20 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.result = ACCEPT,
},
{
	"bounds checks mixing signed and unsigned, variant 5",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, -1),
	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
@@ -135,17 +141,20 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
{
	"bounds checks mixing signed and unsigned, variant 6",
	.insns = {
	BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
	BPF_MOV64_IMM(BPF_REG_2, 0),
	BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_6, -1),
	BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
@@ -163,13 +172,14 @@
{
	"bounds checks mixing signed and unsigned, variant 7",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
@@ -179,19 +189,20 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.result = ACCEPT,
},
{
	"bounds checks mixing signed and unsigned, variant 8",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, -1),
	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
@@ -203,20 +214,21 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
{
	"bounds checks mixing signed and unsigned, variant 9",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
@@ -228,19 +240,20 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.result = ACCEPT,
},
{
	"bounds checks mixing signed and unsigned, variant 10",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, 0),
	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
@@ -252,20 +265,21 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
{
	"bounds checks mixing signed and unsigned, variant 11",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, -1),
	BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
@@ -278,20 +292,21 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
{
	"bounds checks mixing signed and unsigned, variant 12",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, -6),
	BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
@@ -303,20 +318,21 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
{
	"bounds checks mixing signed and unsigned, variant 13",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, 2),
	BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
@@ -331,7 +347,7 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
@@ -340,13 +356,14 @@
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
		    offsetof(struct __sk_buff, mark)),
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, -1),
	BPF_MOV64_IMM(BPF_REG_8, 2),
@@ -360,20 +377,21 @@
	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
	BPF_JMP_IMM(BPF_JA, 0, 0, -7),
	},
	.fixup_map_hash_8b = { 4 },
	.fixup_map_hash_8b = { 6 },
	.errstr = "unbounded min value",
	.result = REJECT,
},
{
	"bounds checks mixing signed and unsigned, variant 15",
	.insns = {
	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
	BPF_MOV64_IMM(BPF_REG_2, -6),
	BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
@@ -387,7 +405,7 @@
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 3 },
	.fixup_map_hash_8b = { 5 },
	.errstr = "unbounded min value",
	.result = REJECT,
},