Commit 7df482e6 authored by Alexei Starovoitov's avatar Alexei Starovoitov
Browse files

Merge branch 'kprobes: rethook: x86: Replace kretprobe trampoline with rethook'

Masami Hiramatsu says:

====================
Here are the 3rd version for generic kretprobe and kretprobe on x86 for
replacing the kretprobe trampoline with rethook. The previous version
is here[1]

[1] https://lore.kernel.org/all/164821817332.2373735.12048266953420821089.stgit@devnote2/T/#u

This version fixed typo and build issues for bpf-next and CONFIG_RETHOOK=y
error. I also add temporary mitigation lines for ANNOTATE_NOENDBR macro
issue for bpf-next tree [2/4].

This will be removed after merging kernel IBT series.

Background:

This rethook came from Jiri's request of multiple kprobe for bpf[2].
He tried to solve an issue that starting bpf with multiple kprobe will
take a long time because bpf-kprobe will wait for RCU grace period for
sync rcu events.

Jiri wanted to attach a single bpf handler to multiple kprobes and
he tried to introduce multiple-probe interface to kprobe. So I asked
him to use ftrace and kretprobe-like hook if it is only for the
function entry and exit, instead of adding ad-hoc interface
to kprobes.
For this purpose, I introduced the fprobe (kprobe like interface for
ftrace) with the rethook (this is a generic return hook feature for
fprobe exit handler)[3].

[2] https://lore.kernel.org/all/20220104080943.113249-1-jolsa@kernel.org/T/#u
[3] https://lore.kernel.org/all/164191321766.806991.7930388561276940676.stgit@devnote2/T/#u



The rethook is basically same as the kretprobe trampoline. I just made
it decoupled from kprobes. Eventually, the all arch dependent kretprobe
trampolines will be replaced with the rethook trampoline instead of
cloning and set HAVE_RETHOOK=y.
When I port the rethook for all arch which supports kretprobe, the
legacy kretprobe specific code (which is for CONFIG_KRETPROBE_ON_RETHOOK=n)
will be removed eventually.
====================

Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents ef8a257b 45c23bf4
Loading
Loading
Loading
Loading
+7 −1
Original line number Diff line number Diff line
@@ -164,7 +164,13 @@ config ARCH_USE_BUILTIN_BSWAP

config KRETPROBES
	def_bool y
	depends on KPROBES && HAVE_KRETPROBES
	depends on KPROBES && (HAVE_KRETPROBES || HAVE_RETHOOK)

config KRETPROBE_ON_RETHOOK
	def_bool y
	depends on HAVE_RETHOOK
	depends on KRETPROBES
	select RETHOOK

config USER_RETURN_NOTIFIER
	bool
+1 −0
Original line number Diff line number Diff line
@@ -224,6 +224,7 @@ config X86
	select HAVE_KPROBES_ON_FTRACE
	select HAVE_FUNCTION_ERROR_INJECTION
	select HAVE_KRETPROBES
	select HAVE_RETHOOK
	select HAVE_KVM
	select HAVE_LIVEPATCH			if X86_64
	select HAVE_MIXED_BREAKPOINTS_REGS
+11 −12
Original line number Diff line number Diff line
@@ -4,7 +4,7 @@

#include <linux/sched.h>
#include <linux/ftrace.h>
#include <linux/kprobes.h>
#include <linux/rethook.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>

@@ -16,7 +16,7 @@ struct unwind_state {
	unsigned long stack_mask;
	struct task_struct *task;
	int graph_idx;
#ifdef CONFIG_KRETPROBES
#if defined(CONFIG_RETHOOK)
	struct llist_node *kr_cur;
#endif
	bool error;
@@ -104,19 +104,18 @@ void unwind_module_init(struct module *mod, void *orc_ip, size_t orc_ip_size,
#endif

static inline
unsigned long unwind_recover_kretprobe(struct unwind_state *state,
unsigned long unwind_recover_rethook(struct unwind_state *state,
				     unsigned long addr, unsigned long *addr_p)
{
#ifdef CONFIG_KRETPROBES
	return is_kretprobe_trampoline(addr) ?
		kretprobe_find_ret_addr(state->task, addr_p, &state->kr_cur) :
		addr;
#else
	return addr;
#ifdef CONFIG_RETHOOK
	if (is_rethook_trampoline(addr))
		return rethook_find_ret_addr(state->task, (unsigned long)addr_p,
					     &state->kr_cur);
#endif
	return addr;
}

/* Recover the return address modified by kretprobe and ftrace_graph. */
/* Recover the return address modified by rethook and ftrace_graph. */
static inline
unsigned long unwind_recover_ret_addr(struct unwind_state *state,
				     unsigned long addr, unsigned long *addr_p)
@@ -125,7 +124,7 @@ unsigned long unwind_recover_ret_addr(struct unwind_state *state,

	ret = ftrace_graph_ret_addr(state->task, &state->graph_idx,
				    addr, addr_p);
	return unwind_recover_kretprobe(state, ret, addr_p);
	return unwind_recover_rethook(state, ret, addr_p);
}

/*
+1 −0
Original line number Diff line number Diff line
@@ -103,6 +103,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS)	+= ftrace.o
obj-$(CONFIG_X86_TSC)		+= trace_clock.o
obj-$(CONFIG_TRACING)		+= trace.o
obj-$(CONFIG_RETHOOK)		+= rethook.o
obj-$(CONFIG_CRASH_CORE)	+= crash_core_$(BITS).o
obj-$(CONFIG_KEXEC_CORE)	+= machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_CORE)	+= relocate_kernel_$(BITS).o crash.o
+1 −0
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@

#include <asm/asm.h>
#include <asm/frame.h>
#include <asm/insn.h>

#ifdef CONFIG_X86_64

Loading