Unverified Commit a4e60eb1 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!7744 v3 Port livepatch related patches

Merge Pull Request from: @ci-robot 
 
PR sync from: Zheng Yejian <zhengyejian1@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/ARGHXSJCZ3NYIZYZC5E3BV3W6Z5VDEUI/ 
Link: https://gitee.com/openeuler/kernel/issues/I9R2TB

v2->v3:
  - Explicitly disable CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE to
    fix checkdefconfig fail.
    Link: https://openeulerjenkins.osinfra.cn/job/multiarch/job/openeuler/job/x86-64/job/kernel/12707/console

v1->v2:
  - Fix format warning and a bug in check_task_calltrace() of ppc32 & ppc64

Masami Hiramatsu (3):
  x86/unwind: Recover kretprobe trampoline entry
  x86/unwind: Compile kretprobe fixup code only if CONFIG_KRETPROBES=y
  arm64: Recover kretprobe modified return address in stacktrace

Zheng Yejian (30):
  livepatch: Move 'struct klp_func_list' out of arch
  livepatch/x86: Move 'struct klp_func_list' related codes out of arch
  livepatch/arm: Remove duplicate 'struct klp_func_list' related codes
  livepatch/arm64: Remove duplicate 'struct klp_func_list' related codes
  livepatch/ppc32: Remove duplicate 'struct klp_func_list' related codes
  livepatch/ppc64: Remove duplicate 'struct klp_func_list' related codes
  livepatch/x86: Implement arch_klp_check_task_calltrace()
  livepatch/arm: Implement arch_klp_check_task_calltrace()
  livepatch/arm64: Implement arch_klp_check_task_calltrace()
  livepatch/ppc32: Implement arch_klp_check_task_calltrace()
  livepatch/ppc64: Implement arch_klp_check_task_calltrace()
  livepatch/x86: Ajust instruction replace order for KLP_STACK_OPTIMIZE
  livepatch/arm: Adjust instruction replace order for KLP_STACK_OPTIMIZE
  livepatch/arm64: Adjust instruction replace order for
  livepatch/ppc32: Adjust instruction replace order for
  livepatch/ppc64: Adjust instruction replace order for
  livepatch: Complete check calltrace for running tasks
  livepatch: Check calltrace of idle tasks
  livepatch: Organize active functions with struct 'list_head'
  livepatch: Fix huge_depth in arch_klp_check_activeness_func()
  livepatch: Use func->func_node directly
  livepatch/core: Make several functions to be static
  livepatch: Fix warning C_RULE_ID_SINGLE_BRANCH_IF_AND_LOOP_BRACKET
  livepatch: Reduce duplicate definition of 'struct
    walk_stackframe_args'
  ftrace: Fix possible use-after-free issue in ftrace_location()
  kprobes: Fix possible use-after-free issue on kprobe registration
  livepatch: Avoid patching conflicts with kprobes
  kprobes: Add kretprobe_find_ret_addr() for searching return address
  livepatch: Update related configs in openeuler_defconfig


-- 
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I9R2TB 
 
Link:https://gitee.com/openeuler/kernel/pulls/7744

 

Reviewed-by: default avatarXu Kuohai <xukuohai@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents f98d3882 32108ede
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -34,11 +34,6 @@ struct klp_func;
int arch_klp_patch_func(struct klp_func *func);
void arch_klp_unpatch_func(struct klp_func *func);

#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
int klp_check_calltrace(struct klp_patch *patch, int enable);
#endif


#if defined(CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY)

#ifdef CONFIG_ARM_MODULE_PLTS
@@ -63,7 +58,6 @@ int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func);
void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func);
long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func);
int arch_klp_module_check_calltrace(void *data);

#endif

#endif /* _ASM_ARM_LIVEPATCH_H */
+83 −251
Original line number Diff line number Diff line
@@ -39,7 +39,6 @@
#define ARM_INSN_SIZE	4
#endif

#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * ARM_INSN_SIZE)
#define CHECK_JUMP_RANGE LJMP_INSN_SIZE

#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
@@ -64,239 +63,37 @@ static bool is_jump_insn(u32 insn)
	return false;
}

struct klp_func_list {
	struct klp_func_list *next;
	unsigned long func_addr;
	unsigned long func_size;
	const char *func_name;
	int force;
};

struct walk_stackframe_args {
	int enable;
	struct klp_func_list *check_funcs;
	struct module *mod;
	int ret;
};

static inline unsigned long klp_size_to_check(unsigned long func_size,
		int force)
{
	unsigned long size = func_size;

	if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK)
		size = MAX_SIZE_TO_CHECK;
	return size;
}

static bool check_jump_insn(unsigned long func_addr)
bool arch_check_jump_insn(unsigned long func_addr)
{
	unsigned long i;
	u32 *insn = (u32*)func_addr;

	for (i = 0; i < CHECK_JUMP_RANGE; i++) {
		if (is_jump_insn(*insn)) {
		if (is_jump_insn(*insn))
			return true;
		}
		insn++;
	}
	return false;
}

static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func,
		unsigned long func_addr, unsigned long func_size, const char *func_name,
		int force)
{
	if (*func == NULL) {
		*funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC);
		if (!(*funcs))
			return -ENOMEM;
		*func = *funcs;
	} else {
		(*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs),
				GFP_ATOMIC);
		if (!(*func)->next)
			return -ENOMEM;
		*func = (*func)->next;
	}
	(*func)->func_addr = func_addr;
	(*func)->func_size = func_size;
	(*func)->func_name = func_name;
	(*func)->force = force;
	(*func)->next = NULL;
	return 0;
}

static int klp_check_activeness_func(struct klp_patch *patch, int enable,
		struct klp_func_list **check_funcs)
static int klp_check_jump_func(struct stackframe *frame, void *ws_args)
{
	int ret;
	struct klp_object *obj;
	struct klp_func_node *func_node;
	struct klp_func *func;
	unsigned long func_addr = 0;
	unsigned long func_size;
	struct klp_func_list *pcheck = NULL;

	for (obj = patch->objs; obj->funcs; obj++) {
		for (func = obj->funcs; func->old_name; func++) {
			unsigned long old_func = (unsigned long)func->old_func;
	struct walk_stackframe_args *args = ws_args;

			if (enable) {
				bool need_check_old = false;

				if (func->patched || func->force == KLP_ENFORCEMENT)
					continue;
				/*
				 * When enable, checking the currently
				 * active functions.
				 */
				func_node = klp_find_func_node(func->old_func);
				if (!func_node ||
				    list_empty(&func_node->func_stack)) {
					/*
					 * No patched on this function
					 * [ the origin one ]
					 */
					func_addr = old_func;
					func_size = func->old_size;
				} else {
					/*
					 * Previously patched function
					 * [ the active one ]
					 */
					struct klp_func *prev;

					prev = list_first_or_null_rcu(
						&func_node->func_stack,
						struct klp_func, stack_node);
					func_addr = (unsigned long)prev->new_func;
					func_size = prev->new_size;
	return !args->check_func(args->data, &args->ret, frame->pc);
}
				/*
				 * When preemption is disabled and the
				 * replacement area does not contain a jump
				 * instruction, the migration thread is
				 * scheduled to run stop machine only after the
				 * excution of intructions to be replaced is
				 * complete.
				 */
				if (IS_ENABLED(CONFIG_PREEMPTION) ||
				    (func->force == KLP_NORMAL_FORCE) ||
				    check_jump_insn(func_addr)) {
					ret = add_func_to_list(check_funcs, &pcheck,
							func_addr, func_size,
							func->old_name, func->force);
					if (ret)
						return ret;
					need_check_old = (func_addr != old_func);
				}
				if (need_check_old) {
					ret = add_func_to_list(check_funcs, &pcheck, old_func,
						func->old_size, func->old_name, func->force);
					if (ret)
						return ret;
				}
			} else {
				/*
				 * When disable, check for the previously
				 * patched function and the function itself
				 * which to be unpatched.
				 */
				func_node = klp_find_func_node(func->old_func);
				if (!func_node)
					return -EINVAL;
#ifdef CONFIG_PREEMPTION
				/*
				 * No scheduling point in the replacement
				 * instructions. Therefore, when preemption is
				 * not enabled, atomic execution is performed
				 * and these instructions will not appear on
				 * the stack.
				 */
				if (list_is_singular(&func_node->func_stack)) {
					func_addr = old_func;
					func_size = func->old_size;
				} else {
					struct klp_func *prev;

					prev = list_first_or_null_rcu(
						&func_node->func_stack,
						struct klp_func, stack_node);
					func_addr = (unsigned long)prev->new_func;
					func_size = prev->new_size;
				}
				ret = add_func_to_list(check_funcs, &pcheck,
						func_addr, func_size,
						func->old_name, 0);
				if (ret)
					return ret;
				if (func_addr != old_func) {
					ret = add_func_to_list(check_funcs, &pcheck, old_func,
						func->old_size, func->old_name, 0);
					if (ret)
						return ret;
				}
#endif
				func_addr = (unsigned long)func->new_func;
				func_size = func->new_size;
				ret = add_func_to_list(check_funcs, &pcheck,
						func_addr, func_size,
						func->old_name, 0);
				if (ret)
					return ret;
			}
		}
	}
	return 0;
}

static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc)
{
	while (funcs != NULL) {
		*ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name,
				klp_size_to_check(funcs->func_size, funcs->force));
		if (*ret) {
			return true;
		}
		funcs = funcs->next;
	}
	return false;
}

static int klp_check_jump_func(struct stackframe *frame, void *data)
{
	struct walk_stackframe_args *args = data;
	struct klp_func_list *check_funcs = args->check_funcs;

	return check_func_list(check_funcs, &args->ret, frame->pc);
}

static void free_list(struct klp_func_list **funcs)
{
	struct klp_func_list *p;

	while (*funcs != NULL) {
		p = *funcs;
		*funcs = (*funcs)->next;
		kfree(p);
	}
}

static int do_check_calltrace(struct walk_stackframe_args *args,
static int check_task_calltrace(struct task_struct *t,
				struct walk_stackframe_args *args,
				int (*fn)(struct stackframe *, void *))
{
	struct task_struct *g, *t;
	struct stackframe frame;

	for_each_process_thread(g, t) {
	if (t == current) {
		frame.fp = (unsigned long)__builtin_frame_address(0);
		frame.sp = current_stack_pointer;
		frame.lr = (unsigned long)__builtin_return_address(0);
			frame.pc = (unsigned long)do_check_calltrace;
		} else if (klp_is_migration_thread(t->comm)) {
			continue;
		frame.pc = (unsigned long)check_task_calltrace;
	} else {
		frame.fp = thread_saved_fp(t);
		frame.sp = thread_saved_sp(t);
@@ -309,41 +106,66 @@ static int do_check_calltrace(struct walk_stackframe_args *args,
		show_stack(t, NULL, KERN_INFO);
		return args->ret;
	}
	return 0;
}

static int do_check_calltrace(struct walk_stackframe_args *args,
			      int (*fn)(struct stackframe *, void *))
{
	int ret;
	struct task_struct *g, *t;
	unsigned int cpu;

	for_each_process_thread(g, t) {
		if (klp_is_migration_thread(t->comm))
			continue;
		ret = check_task_calltrace(t, args, fn);
		if (ret)
			return ret;
	}
	for_each_online_cpu(cpu) {
		ret = check_task_calltrace(idle_task(cpu), args, fn);
		if (ret)
			return ret;
	}
	return 0;
}

int klp_check_calltrace(struct klp_patch *patch, int enable)
#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE
int arch_klp_check_task_calltrace(struct task_struct *t,
				  bool (*check_func)(void *, int *, unsigned long),
				  void *data)
{
	int ret = 0;
	struct klp_func_list *check_funcs = NULL;
	struct walk_stackframe_args args = {
		.enable = enable,
		.ret = 0
		.data = data,
		.ret = 0,
		.check_func = check_func,
	};

	ret = klp_check_activeness_func(patch, enable, &check_funcs);
	if (ret) {
		pr_err("collect active functions failed, ret=%d\n", ret);
		goto out;
	if (t == NULL)
		return -EINVAL;
	return check_task_calltrace(t, &args, klp_check_jump_func);
}
	if (!check_funcs)
		goto out;
#endif

	args.check_funcs = check_funcs;
	ret = do_check_calltrace(&args, klp_check_jump_func);
int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data)
{
	struct walk_stackframe_args args = {
		.data = data,
		.ret = 0,
		.check_func = check_func,
	};

out:
	free_list(&check_funcs);
	return ret;
	return do_check_calltrace(&args, klp_check_jump_func);
}

static int check_module_calltrace(struct stackframe *frame, void *data)
static int check_module_calltrace(struct stackframe *frame, void *ws_args)
{
	struct walk_stackframe_args *args = data;
	struct walk_stackframe_args *args = ws_args;
	struct module *mod = args->data;

	if (within_module_core(frame->pc, args->mod)) {
		pr_err("module %s is in use!\n", args->mod->name);
	if (within_module_core(frame->pc, mod)) {
		pr_err("module %s is in use!\n", mod->name);
		return (args->ret = -EBUSY);
	}
	return 0;
@@ -352,7 +174,7 @@ static int check_module_calltrace(struct stackframe *frame, void *data)
int arch_klp_module_check_calltrace(void *data)
{
	struct walk_stackframe_args args = {
		.mod = (struct module *)data,
		.data = data,
		.ret = 0
	};

@@ -436,14 +258,29 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func)
	return ret;
}

static void klp_patch_text(u32 *dst, const u32 *src, int len)
{
	int i;

	if (len <= 0)
		return;
	/* skip breakpoint at first */
	for (i = 1; i < len; i++)
		__patch_text(dst + i, src[i]);
	/*
	 * Avoid compile optimization, make sure that instructions
	 * except first breakpoint has been patched.
	 */
	barrier();
	__patch_text(dst, src[0]);
}

static int do_patch(unsigned long pc, unsigned long new_addr)
{
	u32 insns[LJMP_INSN_SIZE];

	if (!offset_in_range(pc, new_addr, SZ_32M)) {
#ifdef CONFIG_ARM_MODULE_PLTS
		int i;

		/*
		 * [0] LDR PC, [PC+8]
		 * [4] nop
@@ -453,8 +290,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr)
		insns[1] = __opcode_to_mem_arm(0xe320f000);
		insns[2] = new_addr;

		for (i = 0; i < LJMP_INSN_SIZE; i++)
			__patch_text(((u32 *)pc) + i, insns[i]);
		klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE);
#else
		/*
		 * When offset from 'new_addr' to 'pc' is out of SZ_32M range but
@@ -465,7 +301,7 @@ static int do_patch(unsigned long pc, unsigned long new_addr)
#endif
	} else {
		insns[0] = arm_gen_branch(pc, new_addr);
		__patch_text((void *)pc, insns[0]);
		klp_patch_text((u32 *)pc, insns, 1);
	}
	return 0;
}
@@ -493,11 +329,7 @@ void arch_klp_unpatch_func(struct klp_func *func)
	pc = (unsigned long)func_node->old_func;
	list_del_rcu(&func->stack_node);
	if (list_empty(&func_node->func_stack)) {
		int i;

		for (i = 0; i < LJMP_INSN_SIZE; i++) {
			__patch_text(((u32 *)pc) + i, func_node->arch_data.old_insns[i]);
		}
		klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE);
	} else {
		next_func = list_first_or_null_rcu(&func_node->func_stack,
					struct klp_func, stack_node);
+2 −0
Original line number Diff line number Diff line
@@ -364,6 +364,8 @@ CONFIG_LIVEPATCH_WO_FTRACE=y
CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY=y
# CONFIG_LIVEPATCH_STACK is not set
CONFIG_LIVEPATCH_RESTRICT_KPROBE=y
# CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE is not set
CONFIG_LIVEPATCH_ISOLATE_KPROBE=y
# end of Enable Livepatch

#
+0 −4
Original line number Diff line number Diff line
@@ -41,9 +41,6 @@ static inline int klp_check_compiler_support(void)

int arch_klp_patch_func(struct klp_func *func);
void arch_klp_unpatch_func(struct klp_func *func);
#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY
int klp_check_calltrace(struct klp_patch *patch, int enable);
#endif
#else
#error Live patching support is disabled; check CONFIG_LIVEPATCH
#endif
@@ -72,7 +69,6 @@ int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func);
void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func);
long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func);
int arch_klp_module_check_calltrace(void *data);

#endif

#endif /* _ASM_ARM64_LIVEPATCH_H */
+99 −261
Original line number Diff line number Diff line
@@ -35,7 +35,6 @@
#include <linux/sched/debug.h>
#include <linux/kallsyms.h>

#define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32))
#define CHECK_JUMP_RANGE LJMP_INSN_SIZE

static inline bool offset_in_range(unsigned long pc, unsigned long addr,
@@ -57,230 +56,32 @@ static inline bool offset_in_range(unsigned long pc, unsigned long addr,
		((le32_to_cpu(insn) & 0xfc000000) == 0x94000000) || \
		((le32_to_cpu(insn) & 0xfefff800) == 0xd63f0800))

struct klp_func_list {
	struct klp_func_list *next;
	unsigned long func_addr;
	unsigned long func_size;
	const char *func_name;
	int force;
};

struct walk_stackframe_args {
	int enable;
	struct klp_func_list *check_funcs;
	struct module *mod;
	int ret;
};

static inline unsigned long klp_size_to_check(unsigned long func_size,
		int force)
{
	unsigned long size = func_size;

	if (force == KLP_STACK_OPTIMIZE && size > MAX_SIZE_TO_CHECK)
		size = MAX_SIZE_TO_CHECK;
	return size;
}

static bool check_jump_insn(unsigned long func_addr)
bool arch_check_jump_insn(unsigned long func_addr)
{
	unsigned long i;
	u32 *insn = (u32*)func_addr;

	for (i = 0; i < CHECK_JUMP_RANGE; i++) {
		if (is_jump_insn(*insn)) {
		if (is_jump_insn(*insn))
			return true;
		}
		insn++;
	}
	return false;
}

static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func,
		unsigned long func_addr, unsigned long func_size, const char *func_name,
		int force)
{
	if (*func == NULL) {
		*funcs = (struct klp_func_list *)kzalloc(sizeof(**funcs), GFP_ATOMIC);
		if (!(*funcs))
			return -ENOMEM;
		*func = *funcs;
	} else {
		(*func)->next = (struct klp_func_list *)kzalloc(sizeof(**funcs),
				GFP_ATOMIC);
		if (!(*func)->next)
			return -ENOMEM;
		*func = (*func)->next;
	}
	(*func)->func_addr = func_addr;
	(*func)->func_size = func_size;
	(*func)->func_name = func_name;
	(*func)->force = force;
	(*func)->next = NULL;
	return 0;
}

static int klp_check_activeness_func(struct klp_patch *patch, int enable,
		struct klp_func_list **check_funcs)
static bool klp_check_jump_func(void *ws_args, unsigned long pc)
{
	int ret;
	struct klp_object *obj;
	struct klp_func *func;
	unsigned long func_addr = 0;
	unsigned long func_size;
	struct klp_func_node *func_node;
	struct klp_func_list *pcheck = NULL;

	for (obj = patch->objs; obj->funcs; obj++) {
		for (func = obj->funcs; func->old_name; func++) {
			unsigned long old_func = (unsigned long)func->old_func;

			if (enable) {
				bool need_check_old = false;

				if (func->patched || func->force == KLP_ENFORCEMENT)
					continue;
				/*
				 * When enable, checking the currently
				 * active functions.
				 */
				func_node = klp_find_func_node(func->old_func);
				if (!func_node ||
				    list_empty(&func_node->func_stack)) {
					func_addr = old_func;
					func_size = func->old_size;
				} else {
					/*
					 * Previously patched function
					 * [the active one]
					 */
					struct klp_func *prev;

					prev = list_first_or_null_rcu(
						&func_node->func_stack,
						struct klp_func, stack_node);
					func_addr = (unsigned long)prev->new_func;
					func_size = prev->new_size;
				}
				/*
				 * When preemption is disabled and the
				 * replacement area does not contain a jump
				 * instruction, the migration thread is
				 * scheduled to run stop machine only after the
				 * excution of instructions to be replaced is
				 * complete.
				 */
				if (IS_ENABLED(CONFIG_PREEMPTION) ||
				    (func->force == KLP_NORMAL_FORCE) ||
				    check_jump_insn(func_addr)) {
					ret = add_func_to_list(check_funcs, &pcheck,
							func_addr, func_size,
							func->old_name, func->force);
					if (ret)
						return ret;
					need_check_old = (func_addr != old_func);
				}
				if (need_check_old) {
					ret = add_func_to_list(check_funcs, &pcheck, old_func,
						func->old_size, func->old_name, func->force);
					if (ret)
						return ret;
				}
			} else {
				/*
				 * When disable, check for the previously
				 * patched function and the function itself
				 * which to be unpatched.
				 */
				func_node = klp_find_func_node(func->old_func);
				if (!func_node) {
					return -EINVAL;
				}
#ifdef CONFIG_PREEMPTION
				/*
				 * No scheduling point in the replacement
				 * instructions. Therefore, when preemption is
				 * not enabled, atomic execution is performed
				 * and these instructions will not appear on
				 * the stack.
				 */
				if (list_is_singular(&func_node->func_stack)) {
					func_addr = old_func;
					func_size = func->old_size;
				} else {
					struct klp_func *prev;
	struct walk_stackframe_args *args = ws_args;

					prev = list_first_or_null_rcu(
						&func_node->func_stack,
						struct klp_func, stack_node);
					func_addr = (unsigned long)prev->new_func;
					func_size = prev->new_size;
	return args->check_func(args->data, &args->ret, pc);
}
				ret = add_func_to_list(check_funcs, &pcheck,
						func_addr, func_size,
						func->old_name, 0);
				if (ret)
					return ret;
				if (func_addr != old_func) {
					ret = add_func_to_list(check_funcs, &pcheck, old_func,
						func->old_size, func->old_name, 0);
					if (ret)
						return ret;
				}
#endif

				func_addr = (unsigned long)func->new_func;
				func_size = func->new_size;
				ret = add_func_to_list(check_funcs, &pcheck,
						func_addr, func_size,
						func->old_name, 0);
				if (ret)
					return ret;
			}
		}
	}
	return 0;
}

static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc)
{
	while (funcs != NULL) {
		*ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name,
				klp_size_to_check(funcs->func_size, funcs->force));
		if (*ret) {
			return false;
		}
		funcs = funcs->next;
	}
	return true;
}

static bool klp_check_jump_func(void *data, unsigned long pc)
{
	struct walk_stackframe_args *args = data;
	struct klp_func_list *check_funcs = args->check_funcs;

	return check_func_list(check_funcs, &args->ret, pc);
}

static void free_list(struct klp_func_list **funcs)
{
	struct klp_func_list *p;

	while (*funcs != NULL) {
		p = *funcs;
		*funcs = (*funcs)->next;
		kfree(p);
	}
}

static int do_check_calltrace(struct walk_stackframe_args *args,
static int check_task_calltrace(struct task_struct *t,
				struct walk_stackframe_args *args,
				bool (*fn)(void *, unsigned long))
{
	struct task_struct *g, *t;
	struct stackframe frame;

	for_each_process_thread(g, t) {
	/*
	 * Handle the current carefully on each CPUs, we shouldn't
	 * use saved FP and PC when backtrace current. It's difficult
@@ -291,9 +92,7 @@ static int do_check_calltrace(struct walk_stackframe_args *args,
	if (t == current) {
		/* current on this CPU */
		frame.fp = (unsigned long)__builtin_frame_address(0);
			frame.pc = (unsigned long)do_check_calltrace;
		} else if (klp_is_migration_thread(t->comm)) {
			continue;
		frame.pc = (unsigned long)check_task_calltrace;
	} else {
		frame.fp = thread_saved_fp(t);
		frame.pc = thread_saved_pc(t);
@@ -305,40 +104,66 @@ static int do_check_calltrace(struct walk_stackframe_args *args,
		show_stack(t, NULL, KERN_INFO);
		return args->ret;
	}
	return 0;
}

static int do_check_calltrace(struct walk_stackframe_args *args,
			      bool (*fn)(void *, unsigned long))
{
	int ret;
	struct task_struct *g, *t;
	unsigned int cpu;

	for_each_process_thread(g, t) {
		if (klp_is_migration_thread(t->comm))
			continue;
		ret = check_task_calltrace(t, args, fn);
		if (ret)
			return ret;
	}
	for_each_online_cpu(cpu) {
		ret = check_task_calltrace(idle_task(cpu), args, fn);
		if (ret)
			return ret;
	}
	return 0;
}

int klp_check_calltrace(struct klp_patch *patch, int enable)
#ifdef CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE
int arch_klp_check_task_calltrace(struct task_struct *t,
				  bool (*check_func)(void *, int *, unsigned long),
				  void *data)
{
	int ret = 0;
	struct klp_func_list *check_funcs = NULL;
	struct walk_stackframe_args args = {
		.enable = enable,
		.ret = 0
		.data = data,
		.ret = 0,
		.check_func = check_func,
	};

	ret = klp_check_activeness_func(patch, enable, &check_funcs);
	if (ret) {
		pr_err("collect active functions failed, ret=%d\n", ret);
		goto out;
	if (t == NULL)
		return -EINVAL;
	return check_task_calltrace(t, &args, klp_check_jump_func);
}
	if (!check_funcs)
		goto out;
#endif

	args.check_funcs = check_funcs;
	ret = do_check_calltrace(&args, klp_check_jump_func);
out:
	free_list(&check_funcs);
	return ret;
int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data)
{
	struct walk_stackframe_args args = {
		.data = data,
		.ret = 0,
		.check_func = check_func,
	};

	return do_check_calltrace(&args, klp_check_jump_func);
}

static bool check_module_calltrace(void *data, unsigned long pc)
static bool check_module_calltrace(void *ws_args, unsigned long pc)
{
	struct walk_stackframe_args *args = data;
	struct walk_stackframe_args *args = ws_args;
	struct module *mod = args->data;

	if (within_module_core(pc, args->mod)) {
		pr_err("module %s is in use!\n", args->mod->name);
	if (within_module_core(pc, mod)) {
		pr_err("module %s is in use!\n", mod->name);
		args->ret = -EBUSY;
		return false;
	}
@@ -348,7 +173,7 @@ static bool check_module_calltrace(void *data, unsigned long pc)
int arch_klp_module_check_calltrace(void *data)
{
	struct walk_stackframe_args args = {
		.mod = (struct module *)data,
		.data = data,
		.ret = 0
	};

@@ -410,6 +235,27 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func)
	return ret;
}

static int klp_patch_text(u32 *dst, const u32 *src, int len)
{
	int i;
	int ret;

	if (len <= 0)
		return -EINVAL;
	/* skip breakpoint at first */
	for (i = 1; i < len; i++) {
		ret = aarch64_insn_patch_text_nosync(dst + i, src[i]);
		if (ret)
			return ret;
	}
	/*
	 * Avoid compile optimization, make sure that instructions
	 * except first breakpoint has been patched.
	 */
	barrier();
	return aarch64_insn_patch_text_nosync(dst, src[0]);
}

static int do_patch(unsigned long pc, unsigned long new_addr)
{
	u32 insns[LJMP_INSN_SIZE];
@@ -418,27 +264,23 @@ static int do_patch(unsigned long pc, unsigned long new_addr)
	if (offset_in_range(pc, new_addr, SZ_128M)) {
		insns[0] = aarch64_insn_gen_branch_imm(pc, new_addr,
						       AARCH64_INSN_BRANCH_NOLINK);
		ret = aarch64_insn_patch_text_nosync((void *)pc, insns[0]);
		ret = klp_patch_text((u32 *)pc, insns, 1);
		if (ret) {
			pr_err("patch instruction small range failed, ret=%d\n", ret);
			return -EPERM;
		}
	} else {
#ifdef CONFIG_ARM64_MODULE_PLTS
		int i;

		insns[0] = 0x92800010 | (((~new_addr) & 0xffff)) << 5;
		insns[1] = 0xf2a00010 | (((new_addr >> 16) & 0xffff)) << 5;
		insns[2] = 0xf2c00010 | (((new_addr >> 32) & 0xffff)) << 5;
		insns[3] = 0xd61f0200;
		for (i = 0; i < LJMP_INSN_SIZE; i++) {
			ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i, insns[i]);
		ret = klp_patch_text((u32 *)pc, insns, LJMP_INSN_SIZE);
		if (ret) {
				pr_err("patch instruction %d large range failed, ret=%d\n",
				       i, ret);
			pr_err("patch instruction large range failed, ret=%d\n", ret);
			return -EPERM;
		}
		}
#else
		/*
		 * When offset from 'new_addr' to 'pc' is out of SZ_128M range but
@@ -469,21 +311,17 @@ void arch_klp_unpatch_func(struct klp_func *func)
	struct klp_func_node *func_node;
	struct klp_func *next_func;
	unsigned long pc;
	int i;
	int ret;

	func_node = func->func_node;
	pc = (unsigned long)func_node->old_func;
	list_del_rcu(&func->stack_node);
	if (list_empty(&func_node->func_stack)) {
		for (i = 0; i < LJMP_INSN_SIZE; i++) {
			ret = aarch64_insn_patch_text_nosync(((u32 *)pc) + i,
							     func_node->arch_data.old_insns[i]);
		ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, LJMP_INSN_SIZE);
		if (ret) {
				pr_err("restore instruction %d failed, ret=%d\n", i, ret);
			pr_err("restore instruction failed, ret=%d\n", ret);
			return;
		}
		}
	} else {
		next_func = list_first_or_null_rcu(&func_node->func_stack,
					struct klp_func, stack_node);
Loading