Commit a602285a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'per_signal_struct_coredumps-for-v5.16' of...

Merge branch 'per_signal_struct_coredumps-for-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace

Pull per signal_struct coredumps from Eric Biederman:
 "Current coredumps are mixed up with the exit code, the signal handling
  code, and the ptrace code making coredumps much more complicated than
  necessary and difficult to follow.

  This series of changes starts with ptrace_stop and cleans it up,
  making it easier to follow what is happening in ptrace_stop. Then
  cleans up the exec interactions with coredumps. Then cleans up the
  coredump interactions with exit. Finally the coredump interactions
  with the signal handling code is cleaned up.

  The first and last changes are bug fixes for minor bugs.

  I believe the fact that vfork followed by execve can kill the process
  the called vfork if exec fails is sufficient justification to change
  the userspace visible behavior.

  In previous discussions some of these changes were organized
  differently and individually appeared to make the code base worse. As
  currently written I believe they all stand on their own as cleanups
  and bug fixes.

  Which means that even if the worst should happen and the last change
  needs to be reverted for some unimaginable reason, the code base will
  still be improved.

  If the worst does not happen there are a more cleanups that can be
  made. Signals that generate coredumps can easily become eligible for
  short circuit delivery in complete_signal. The entire rendezvous for
  generating a coredump can move into get_signal. The function
  force_sig_info_to_task be written in a way that does not modify the
  signal handling state of the target task (because coredumps are
  eligible for short circuit delivery). Many of these future cleanups
  can be done another way but nothing so cleanly as if coredumps become
  per signal_struct"

* 'per_signal_struct_coredumps-for-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace:
  coredump: Limit coredumps to a single thread group
  coredump:  Don't perform any cleanups before dumping core
  exit: Factor coredump_exit_mm out of exit_mm
  exec: Check for a pending fatal signal instead of core_state
  ptrace: Remove the unnecessary arguments from arch_ptrace_stop
  signal: Remove the bogus sigkill_pending in ptrace_stop
parents 5c4e0a21 3f66f86b
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -134,9 +134,9 @@ static inline long regs_return_value(struct pt_regs *regs)
  extern void ia64_decrement_ip (struct pt_regs *pt);

  extern void ia64_ptrace_stop(void);
  #define arch_ptrace_stop(code, info) \
  #define arch_ptrace_stop() \
	ia64_ptrace_stop()
  #define arch_ptrace_stop_needed(code, info) \
  #define arch_ptrace_stop_needed() \
	(!test_thread_flag(TIF_RESTORE_RSE))

  extern void ptrace_attach_sync_user_rbs (struct task_struct *);
+4 −4
Original line number Diff line number Diff line
@@ -26,12 +26,12 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
	return (regs->tstate &= ~TSTATE_SYSCALL);
}

#define arch_ptrace_stop_needed(exit_code, info) \
#define arch_ptrace_stop_needed() \
({	flush_user_windows(); \
	get_thread_wsaved() != 0; \
})

#define arch_ptrace_stop(exit_code, info) \
#define arch_ptrace_stop() \
	synchronize_user_stack()

#define current_pt_regs() \
@@ -129,12 +129,12 @@ static inline bool pt_regs_clear_syscall(struct pt_regs *regs)
	return (regs->psr &= ~PSR_SYSCALL);
}

#define arch_ptrace_stop_needed(exit_code, info) \
#define arch_ptrace_stop_needed() \
({	flush_user_windows(); \
	current_thread_info()->w_saved != 0;	\
})

#define arch_ptrace_stop(exit_code, info) \
#define arch_ptrace_stop() \
	synchronize_user_stack()

#define current_pt_regs() \
+2 −2
Original line number Diff line number Diff line
@@ -1834,7 +1834,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
	/*
	 * Allocate a structure for each thread.
	 */
	for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
	for (ct = &dump_task->signal->core_state->dumper; ct; ct = ct->next) {
		t = kzalloc(offsetof(struct elf_thread_core_info,
				     notes[info->thread_notes]),
			    GFP_KERNEL);
@@ -2024,7 +2024,7 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
	if (!elf_note_info_init(info))
		return 0;

	for (ct = current->mm->core_state->dumper.next;
	for (ct = current->signal->core_state->dumper.next;
					ct; ct = ct->next) {
		ets = kzalloc(sizeof(*ets), GFP_KERNEL);
		if (!ets)
+1 −1
Original line number Diff line number Diff line
@@ -1494,7 +1494,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
	if (dump_vma_snapshot(cprm, &vma_count, &vma_meta, &vma_data_size))
		goto end_coredump;

	for (ct = current->mm->core_state->dumper.next;
	for (ct = current->signal->core_state->dumper.next;
					ct; ct = ct->next) {
		tmp = elf_dump_thread_status(cprm->siginfo->si_signo,
					     ct->task, &thread_status_size);
+11 −77
Original line number Diff line number Diff line
@@ -359,7 +359,7 @@ static int zap_process(struct task_struct *start, int exit_code, int flags)

	for_each_thread(start, t) {
		task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
		if (t != current && t->mm) {
		if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
			sigaddset(&t->pending.signal, SIGKILL);
			signal_wake_up(t, 1);
			nr++;
@@ -369,99 +369,34 @@ static int zap_process(struct task_struct *start, int exit_code, int flags)
	return nr;
}

static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
static int zap_threads(struct task_struct *tsk,
			struct core_state *core_state, int exit_code)
{
	struct task_struct *g, *p;
	unsigned long flags;
	int nr = -EAGAIN;

	spin_lock_irq(&tsk->sighand->siglock);
	if (!signal_group_exit(tsk->signal)) {
		mm->core_state = core_state;
		tsk->signal->core_state = core_state;
		tsk->signal->group_exit_task = tsk;
		nr = zap_process(tsk, exit_code, 0);
		clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
	}
	spin_unlock_irq(&tsk->sighand->siglock);
	if (unlikely(nr < 0))
		return nr;

		tsk->flags |= PF_DUMPCORE;
	if (atomic_read(&mm->mm_users) == nr + 1)
		goto done;
	/*
	 * We should find and kill all tasks which use this mm, and we should
	 * count them correctly into ->nr_threads. We don't take tasklist
	 * lock, but this is safe wrt:
	 *
	 * fork:
	 *	None of sub-threads can fork after zap_process(leader). All
	 *	processes which were created before this point should be
	 *	visible to zap_threads() because copy_process() adds the new
	 *	process to the tail of init_task.tasks list, and lock/unlock
	 *	of ->siglock provides a memory barrier.
	 *
	 * do_exit:
	 *	The caller holds mm->mmap_lock. This means that the task which
	 *	uses this mm can't pass exit_mm(), so it can't exit or clear
	 *	its ->mm.
	 *
	 * de_thread:
	 *	It does list_replace_rcu(&leader->tasks, &current->tasks),
	 *	we must see either old or new leader, this does not matter.
	 *	However, it can change p->sighand, so lock_task_sighand(p)
	 *	must be used. Since p->mm != NULL and we hold ->mmap_lock
	 *	it can't fail.
	 *
	 *	Note also that "g" can be the old leader with ->mm == NULL
	 *	and already unhashed and thus removed from ->thread_group.
	 *	This is OK, __unhash_process()->list_del_rcu() does not
	 *	clear the ->next pointer, we will find the new leader via
	 *	next_thread().
	 */
	rcu_read_lock();
	for_each_process(g) {
		if (g == tsk->group_leader)
			continue;
		if (g->flags & PF_KTHREAD)
			continue;

		for_each_thread(g, p) {
			if (unlikely(!p->mm))
				continue;
			if (unlikely(p->mm == mm)) {
				lock_task_sighand(p, &flags);
				nr += zap_process(p, exit_code,
							SIGNAL_GROUP_EXIT);
				unlock_task_sighand(p, &flags);
			}
			break;
		}
	}
	rcu_read_unlock();
done:
		atomic_set(&core_state->nr_threads, nr);
	}
	spin_unlock_irq(&tsk->sighand->siglock);
	return nr;
}

static int coredump_wait(int exit_code, struct core_state *core_state)
{
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
	int core_waiters = -EBUSY;

	init_completion(&core_state->startup);
	core_state->dumper.task = tsk;
	core_state->dumper.next = NULL;

	if (mmap_write_lock_killable(mm))
		return -EINTR;

	if (!mm->core_state)
		core_waiters = zap_threads(tsk, mm, core_state, exit_code);
	mmap_write_unlock(mm);

	core_waiters = zap_threads(tsk, core_state, exit_code);
	if (core_waiters > 0) {
		struct core_thread *ptr;

@@ -483,7 +418,7 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
	return core_waiters;
}

static void coredump_finish(struct mm_struct *mm, bool core_dumped)
static void coredump_finish(bool core_dumped)
{
	struct core_thread *curr, *next;
	struct task_struct *task;
@@ -493,22 +428,21 @@ static void coredump_finish(struct mm_struct *mm, bool core_dumped)
		current->signal->group_exit_code |= 0x80;
	current->signal->group_exit_task = NULL;
	current->signal->flags = SIGNAL_GROUP_EXIT;
	next = current->signal->core_state->dumper.next;
	current->signal->core_state = NULL;
	spin_unlock_irq(&current->sighand->siglock);

	next = mm->core_state->dumper.next;
	while ((curr = next) != NULL) {
		next = curr->next;
		task = curr->task;
		/*
		 * see exit_mm(), curr->task must not see
		 * see coredump_task_exit(), curr->task must not see
		 * ->task == NULL before we read ->next.
		 */
		smp_mb();
		curr->task = NULL;
		wake_up_process(task);
	}

	mm->core_state = NULL;
}

static bool dump_interrupted(void)
@@ -839,7 +773,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
fail_unlock:
	kfree(argv);
	kfree(cn.corename);
	coredump_finish(mm, core_dumped);
	coredump_finish(core_dumped);
	revert_creds(old_cred);
fail_creds:
	put_cred(cred);
Loading