Commit cf71b174 authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Alexei Starovoitov
Browse files

bpf: rename poke descriptor's 'ip' member to 'tailcall_target'



Reflect the actual purpose of poke->ip and rename it to
poke->tailcall_target so that it will not the be confused with another
poke target that will be introduced in next commit.

While at it, do the same thing with poke->ip_stable - rename it to
poke->tailcall_target_stable.

Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent a748c697
Loading
Loading
Loading
Loading
+11 −9
Original line number Diff line number Diff line
@@ -434,7 +434,7 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
	EMIT3(0x83, 0xC0, 0x01);                      /* add eax, 1 */
	EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */

	poke->ip = image + (addr - X86_PATCH_SIZE);
	poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
	poke->adj_off = PROLOGUE_SIZE;

	memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
@@ -453,7 +453,7 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)

	for (i = 0; i < prog->aux->size_poke_tab; i++) {
		poke = &prog->aux->poke_tab[i];
		WARN_ON_ONCE(READ_ONCE(poke->ip_stable));
		WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));

		if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
			continue;
@@ -464,18 +464,20 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
		if (target) {
			/* Plain memcpy is used when image is not live yet
			 * and still not locked as read-only. Once poke
			 * location is active (poke->ip_stable), any parallel
			 * bpf_arch_text_poke() might occur still on the
			 * read-write image until we finally locked it as
			 * read-only. Both modifications on the given image
			 * are under text_mutex to avoid interference.
			 * location is active (poke->tailcall_target_stable),
			 * any parallel bpf_arch_text_poke() might occur
			 * still on the read-write image until we finally
			 * locked it as read-only. Both modifications on
			 * the given image are under text_mutex to avoid
			 * interference.
			 */
			ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
			ret = __bpf_arch_text_poke(poke->tailcall_target,
						   BPF_MOD_JUMP, NULL,
						   (u8 *)target->bpf_func +
						   poke->adj_off, false);
			BUG_ON(ret < 0);
		}
		WRITE_ONCE(poke->ip_stable, true);
		WRITE_ONCE(poke->tailcall_target_stable, true);
		mutex_unlock(&array->aux->poke_mutex);
	}
}
+2 −2
Original line number Diff line number Diff line
@@ -697,14 +697,14 @@ enum bpf_jit_poke_reason {

/* Descriptor of pokes pointing /into/ the JITed image. */
struct bpf_jit_poke_descriptor {
	void *ip;
	void *tailcall_target;
	union {
		struct {
			struct bpf_map *map;
			u32 key;
		} tail_call;
	};
	bool ip_stable;
	bool tailcall_target_stable;
	u8 adj_off;
	u16 reason;
	u32 insn_idx;
+9 −8
Original line number Diff line number Diff line
@@ -918,12 +918,13 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
			 *    there could be danger of use after free otherwise.
			 * 2) Initially when we start tracking aux, the program
			 *    is not JITed yet and also does not have a kallsyms
			 *    entry. We skip these as poke->ip_stable is not
			 *    active yet. The JIT will do the final fixup before
			 *    setting it stable. The various poke->ip_stable are
			 *    successively activated, so tail call updates can
			 *    arrive from here while JIT is still finishing its
			 *    final fixup for non-activated poke entries.
			 *    entry. We skip these as poke->tailcall_target_stable
			 *    is not active yet. The JIT will do the final fixup
			 *    before setting it stable. The various
			 *    poke->tailcall_target_stable are successively
			 *    activated, so tail call updates can arrive from here
			 *    while JIT is still finishing its final fixup for
			 *    non-activated poke entries.
			 * 3) On program teardown, the program's kallsym entry gets
			 *    removed out of RCU callback, but we can only untrack
			 *    from sleepable context, therefore bpf_arch_text_poke()
@@ -940,7 +941,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
			 * 5) Any other error happening below from bpf_arch_text_poke()
			 *    is a unexpected bug.
			 */
			if (!READ_ONCE(poke->ip_stable))
			if (!READ_ONCE(poke->tailcall_target_stable))
				continue;
			if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
				continue;
@@ -948,7 +949,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
			    poke->tail_call.key != key)
				continue;

			ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
			ret = bpf_arch_text_poke(poke->tailcall_target, BPF_MOD_JUMP,
						 old ? (u8 *)old->bpf_func +
						 poke->adj_off : NULL,
						 new ? (u8 *)new->bpf_func +
+2 −1
Original line number Diff line number Diff line
@@ -775,7 +775,8 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,

	if (size > poke_tab_max)
		return -ENOSPC;
	if (poke->ip || poke->ip_stable || poke->adj_off)
	if (poke->tailcall_target || poke->tailcall_target_stable ||
	    poke->adj_off)
		return -EINVAL;

	switch (poke->reason) {