Commit 30b8fdbb authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'bpf: Fixes for CONFIG_X86_KERNEL_IBT'



Jiri Olsa says:

====================
Martynas reported bpf_get_func_ip returning +4 address when
CONFIG_X86_KERNEL_IBT option is enabled and I found there are
some failing bpf tests when this option is enabled.

The CONFIG_X86_KERNEL_IBT option adds endbr instruction at the
function entry, so the idea is to 'fix' entry ip for kprobe_multi
and trampoline probes, because they are placed on the function
entry.

v5 changes:
  - updated uapi/linux/bpf.h headers with comment for
    bpf_get_func_ip returning 0 [Andrii]
  - added acks

v4 changes:
  - used get_kernel_nofault to read previous instruction [Peter]
  - used movabs instruction in trampoline comment [Peter]
  - renamed fentry_ip argument in kprobe_multi_link_handler [Peter]

v3 changes:
  - using 'unused' bpf function to get IBT config option
    into selftest skeleton
  - rebased to current bpf-next/master
  - added ack/review from Masami

v2 changes:
  - change kprobes get_func_ip to return zero for kprobes
    attached within the function body [Andrii]
  - detect IBT config and properly test kprobe with offset
    [Andrii]

v1 changes:
  - read previous instruction in kprobe_multi link handler
    and adjust entry_ip for CONFIG_X86_KERNEL_IBT option
  - split first patch into 2 separate changes
  - update changelogs
====================

Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents bec21719 738c345b
Loading
Loading
Loading
Loading
+5 −6
Original line number Diff line number Diff line
@@ -662,7 +662,7 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
		 */
		emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
	} else {
		/* movabsq %rax, imm64 */
		/* movabsq rax, imm64 */
		EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
		EMIT(imm32_lo, 4);
		EMIT(imm32_hi, 4);
@@ -2039,13 +2039,14 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
				const struct btf_func_model *m, u32 flags,
				struct bpf_tramp_links *tlinks,
				void *orig_call)
				void *func_addr)
{
	int ret, i, nr_args = m->nr_args, extra_nregs = 0;
	int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
	struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
	struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
	struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
	void *orig_call = func_addr;
	u8 **branches = NULL;
	u8 *prog;
	bool save_ret;
@@ -2126,12 +2127,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i

	if (flags & BPF_TRAMP_F_IP_ARG) {
		/* Store IP address of the traced function:
		 * mov rax, QWORD PTR [rbp + 8]
		 * sub rax, X86_PATCH_SIZE
		 * movabsq rax, func_addr
		 * mov QWORD PTR [rbp - ip_off], rax
		 */
		emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
		EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
		emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
	}

+1 −0
Original line number Diff line number Diff line
@@ -103,6 +103,7 @@ struct kprobe {
				   * this flag is only for optimized_kprobe.
				   */
#define KPROBE_FLAG_FTRACE	8 /* probe is using ftrace */
#define KPROBE_FLAG_ON_FUNC_ENTRY	16 /* probe is on the function entry */

/* Has this kprobe gone ? */
static inline bool kprobe_gone(struct kprobe *p)
+1 −0
Original line number Diff line number Diff line
@@ -4951,6 +4951,7 @@ union bpf_attr {
 * 		Get address of the traced function (for tracing and kprobe programs).
 * 	Return
 * 		Address of the traced function.
 * 		0 for kprobes placed within the function (not at the entry).
 *
 * u64 bpf_get_attach_cookie(void *ctx)
 * 	Description
+5 −1
Original line number Diff line number Diff line
@@ -1606,9 +1606,10 @@ int register_kprobe(struct kprobe *p)
	struct kprobe *old_p;
	struct module *probed_mod;
	kprobe_opcode_t *addr;
	bool on_func_entry;

	/* Adjust probe address from symbol */
	addr = kprobe_addr(p);
	addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
	if (IS_ERR(addr))
		return PTR_ERR(addr);
	p->addr = addr;
@@ -1628,6 +1629,9 @@ int register_kprobe(struct kprobe *p)

	mutex_lock(&kprobe_mutex);

	if (on_func_entry)
		p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;

	old_p = get_kprobe(p->addr);
	if (old_p) {
		/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
+22 −3
Original line number Diff line number Diff line
@@ -1028,11 +1028,30 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
	.arg1_type	= ARG_PTR_TO_CTX,
};

#ifdef CONFIG_X86_KERNEL_IBT
static unsigned long get_entry_ip(unsigned long fentry_ip)
{
	u32 instr;

	/* Being extra safe in here in case entry ip is on the page-edge. */
	if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1))
		return fentry_ip;
	if (is_endbr(instr))
		fentry_ip -= ENDBR_INSN_SIZE;
	return fentry_ip;
}
#else
#define get_entry_ip(fentry_ip) fentry_ip
#endif

BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
{
	struct kprobe *kp = kprobe_running();

	return kp ? (uintptr_t)kp->addr : 0;
	if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
		return 0;

	return get_entry_ip((uintptr_t)kp->addr);
}

static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = {
@@ -2600,13 +2619,13 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
}

static void
kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip,
kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip,
			  struct pt_regs *regs)
{
	struct bpf_kprobe_multi_link *link;

	link = container_of(fp, struct bpf_kprobe_multi_link, fp);
	kprobe_multi_link_prog_run(link, entry_ip, regs);
	kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs);
}

static int symbols_cmp_r(const void *a, const void *b, const void *priv)
Loading