Commit 2500ad1c authored by Eric W. Biederman's avatar Eric W. Biederman
Browse files

ptrace: Don't change __state



Stop playing with tsk->__state to remove TASK_WAKEKILL while a ptrace
command is executing.

Instead remove TASK_WAKEKILL from the definition of TASK_TRACED, and
implement a new jobctl flag TASK_PTRACE_FROZEN.  This new flag is set
in jobctl_freeze_task and cleared when ptrace_stop is awoken or in
jobctl_unfreeze_task (when ptrace_stop remains asleep).

In signal_wake_up add __TASK_TRACED to state along with TASK_WAKEKILL
when the wake up is for a fatal signal.  Skip adding __TASK_TRACED
when TASK_PTRACE_FROZEN is not set.  This has the same effect as
changing TASK_TRACED to __TASK_TRACED as all of the wake_ups that use
TASK_KILLABLE go through signal_wake_up.

Handle a ptrace_stop being called with a pending fatal signal.
Previously it would have been handled by schedule simply failing to
sleep.  As TASK_WAKEKILL is no longer part of TASK_TRACED schedule
will sleep with a fatal_signal_pending.   The code in signal_wake_up
guarantees that the code will be awaked by any fatal signal that
codes after TASK_TRACED is set.

Previously the __state value of __TASK_TRACED was changed to
TASK_RUNNING when woken up or back to TASK_TRACED when the code was
left in ptrace_stop.  Now when woken up ptrace_stop now clears
JOBCTL_PTRACE_FROZEN and when left sleeping ptrace_unfreezed_traced
clears JOBCTL_PTRACE_FROZEN.

Tested-by: default avatarKees Cook <keescook@chromium.org>
Reviewed-by: default avatarOleg Nesterov <oleg@redhat.com>
Link: https://lkml.kernel.org/r/20220505182645.497868-10-ebiederm@xmission.com


Signed-off-by: default avatar"Eric W. Biederman" <ebiederm@xmission.com>
parent 57b6de08
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -103,7 +103,7 @@ struct task_group;
/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)
#define TASK_TRACED			__TASK_TRACED

#define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)

+2 −0
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@ struct task_struct;
#define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
#define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
#define JOBCTL_TRAP_FREEZE_BIT	23	/* trap for cgroup freezer */
#define JOBCTL_PTRACE_FROZEN_BIT	24	/* frozen for ptrace */

#define JOBCTL_STOP_DEQUEUED	(1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING	(1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,6 +29,7 @@ struct task_struct;
#define JOBCTL_TRAPPING		(1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING	(1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_FREEZE	(1UL << JOBCTL_TRAP_FREEZE_BIT)
#define JOBCTL_PTRACE_FROZEN	(1UL << JOBCTL_PTRACE_FROZEN_BIT)

#define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
+3 −2
Original line number Diff line number Diff line
@@ -435,9 +435,10 @@ extern void calculate_sigpending(void);

extern void signal_wake_up_state(struct task_struct *t, unsigned int state);

static inline void signal_wake_up(struct task_struct *t, bool resume)
static inline void signal_wake_up(struct task_struct *t, bool fatal)
{
	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
	fatal = fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN);
	signal_wake_up_state(t, fatal ? TASK_WAKEKILL | __TASK_TRACED : 0);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
+8 −13
Original line number Diff line number Diff line
@@ -197,7 +197,7 @@ static bool ptrace_freeze_traced(struct task_struct *task)
	spin_lock_irq(&task->sighand->siglock);
	if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
	    !__fatal_signal_pending(task)) {
		WRITE_ONCE(task->__state, __TASK_TRACED);
		task->jobctl |= JOBCTL_PTRACE_FROZEN;
		ret = true;
	}
	spin_unlock_irq(&task->sighand->siglock);
@@ -207,23 +207,19 @@ static bool ptrace_freeze_traced(struct task_struct *task)

static void ptrace_unfreeze_traced(struct task_struct *task)
{
	if (READ_ONCE(task->__state) != __TASK_TRACED)
		return;

	WARN_ON(!task->ptrace || task->parent != current);
	unsigned long flags;

	/*
	 * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
	 * Recheck state under the lock to close this race.
	 * The child may be awake and may have cleared
	 * JOBCTL_PTRACE_FROZEN (see ptrace_resume).  The child will
	 * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
	 */
	spin_lock_irq(&task->sighand->siglock);
	if (READ_ONCE(task->__state) == __TASK_TRACED) {
	if (lock_task_sighand(task, &flags)) {
		task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
		if (__fatal_signal_pending(task))
			wake_up_state(task, __TASK_TRACED);
		else
			WRITE_ONCE(task->__state, TASK_TRACED);
		unlock_task_sighand(task, &flags);
	}
	spin_unlock_irq(&task->sighand->siglock);
}

/**
@@ -256,7 +252,6 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
	 */
	read_lock(&tasklist_lock);
	if (child->ptrace && child->parent == current) {
		WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
		/*
		 * child->sighand can't be NULL, release_task()
		 * does ptrace_unlink() before __exit_signal().
+1 −4
Original line number Diff line number Diff line
@@ -6304,10 +6304,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)

	/*
	 * We must load prev->state once (task_struct::state is volatile), such
	 * that:
	 *
	 *  - we form a control dependency vs deactivate_task() below.
	 *  - ptrace_{,un}freeze_traced() can change ->state underneath us.
	 * that we form a control dependency vs deactivate_task() below.
	 */
	prev_state = READ_ONCE(prev->__state);
	if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
Loading