]> git.baikalelectronics.ru Git - kernel.git/commitdiff
sched,signal,ptrace: Rework TASK_TRACED, TASK_STOPPED state
authorPeter Zijlstra <peterz@infradead.org>
Tue, 3 May 2022 20:57:47 +0000 (15:57 -0500)
committerEric W. Biederman <ebiederm@xmission.com>
Wed, 11 May 2022 19:37:06 +0000 (14:37 -0500)
Currently ptrace_stop() / do_signal_stop() rely on the special states
TASK_TRACED and TASK_STOPPED resp. to keep unique state. That is, this
state exists only in task->__state and nowhere else.

There's two spots of bother with this:

 - PREEMPT_RT has task->saved_state which complicates matters,
   meaning task_is_{traced,stopped}() needs to check an additional
   variable.

 - An alternative freezer implementation that itself relies on a
   special TASK state would loose TASK_TRACED/TASK_STOPPED and will
   result in misbehaviour.

As such, add additional state to task->jobctl to track this state
outside of task->__state.

NOTE: this doesn't actually fix anything yet, just adds extra state.

--EWB
  * didn't add a unnecessary newline in signal.h
  * Update t->jobctl in signal_wake_up and ptrace_signal_wake_up
    instead of in signal_wake_up_state.  This prevents the clearing
    of TASK_STOPPED and TASK_TRACED from getting lost.
  * Added warnings if JOBCTL_STOPPED or JOBCTL_TRACED are not cleared

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220421150654.757693825@infradead.org
Tested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Link: https://lkml.kernel.org/r/20220505182645.497868-12-ebiederm@xmission.com
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
include/linux/sched.h
include/linux/sched/jobctl.h
include/linux/sched/signal.h
kernel/ptrace.c
kernel/signal.c

index 610f2fdb1e2caa13bdce84d59ed387688a9adc03..cbe5c899599c229f887bfe39931243362e07d442 100644 (file)
@@ -118,11 +118,9 @@ struct task_group;
 
 #define task_is_running(task)          (READ_ONCE((task)->__state) == TASK_RUNNING)
 
-#define task_is_traced(task)           ((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
-
-#define task_is_stopped(task)          ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
-
-#define task_is_stopped_or_traced(task)        ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+#define task_is_traced(task)           ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
+#define task_is_stopped(task)          ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
+#define task_is_stopped_or_traced(task)        ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
 
 /*
  * Special states are those that do not use the normal wait-loop pattern. See
index d556c342596345d72bb8ebc56e6d67a1d120c11f..68876d0a7ef9a91bc9a20c03d959bf2f837cd68c 100644 (file)
@@ -21,6 +21,9 @@ struct task_struct;
 #define JOBCTL_TRAP_FREEZE_BIT 23      /* trap for cgroup freezer */
 #define JOBCTL_PTRACE_FROZEN_BIT       24      /* frozen for ptrace */
 
+#define JOBCTL_STOPPED_BIT     26      /* do_signal_stop() */
+#define JOBCTL_TRACED_BIT      27      /* ptrace_stop() */
+
 #define JOBCTL_STOP_DEQUEUED   (1UL << JOBCTL_STOP_DEQUEUED_BIT)
 #define JOBCTL_STOP_PENDING    (1UL << JOBCTL_STOP_PENDING_BIT)
 #define JOBCTL_STOP_CONSUME    (1UL << JOBCTL_STOP_CONSUME_BIT)
@@ -31,6 +34,9 @@ struct task_struct;
 #define JOBCTL_TRAP_FREEZE     (1UL << JOBCTL_TRAP_FREEZE_BIT)
 #define JOBCTL_PTRACE_FROZEN   (1UL << JOBCTL_PTRACE_FROZEN_BIT)
 
+#define JOBCTL_STOPPED         (1UL << JOBCTL_STOPPED_BIT)
+#define JOBCTL_TRACED          (1UL << JOBCTL_TRACED_BIT)
+
 #define JOBCTL_TRAP_MASK       (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
 #define JOBCTL_PENDING_MASK    (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
 
index e66948abbee46fab302a4375ab3a561277a4b04f..07ba3404fcde45a8d70a6901b3c176707b5633a5 100644 (file)
@@ -294,8 +294,10 @@ static inline int kernel_dequeue_signal(void)
 static inline void kernel_signal_stop(void)
 {
        spin_lock_irq(&current->sighand->siglock);
-       if (current->jobctl & JOBCTL_STOP_DEQUEUED)
+       if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
+               current->jobctl |= JOBCTL_STOPPED;
                set_special_state(TASK_STOPPED);
+       }
        spin_unlock_irq(&current->sighand->siglock);
 
        schedule();
@@ -437,12 +439,21 @@ extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
 
 static inline void signal_wake_up(struct task_struct *t, bool fatal)
 {
-       fatal = fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN);
-       signal_wake_up_state(t, fatal ? TASK_WAKEKILL | __TASK_TRACED : 0);
+       unsigned int state = 0;
+       if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
+               t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
+               state = TASK_WAKEKILL | __TASK_TRACED;
+       }
+       signal_wake_up_state(t, state);
 }
 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
 {
-       signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
+       unsigned int state = 0;
+       if (resume) {
+               t->jobctl &= ~JOBCTL_TRACED;
+               state = __TASK_TRACED;
+       }
+       signal_wake_up_state(t, state);
 }
 
 void task_join_group_stop(struct task_struct *task);
index 36a5b7a00d2f2295cb75e38c52e8045422ec213d..328a34a991248923d564a76e87d588e4562bb9ff 100644 (file)
@@ -185,7 +185,12 @@ static bool looks_like_a_spurious_pid(struct task_struct *task)
        return true;
 }
 
-/* Ensure that nothing can wake it up, even SIGKILL */
+/*
+ * Ensure that nothing can wake it up, even SIGKILL
+ *
+ * A task is switched to this state while a ptrace operation is in progress;
+ * such that the ptrace operation is uninterruptible.
+ */
 static bool ptrace_freeze_traced(struct task_struct *task)
 {
        bool ret = false;
@@ -216,8 +221,10 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
         */
        if (lock_task_sighand(task, &flags)) {
                task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
-               if (__fatal_signal_pending(task))
+               if (__fatal_signal_pending(task)) {
+                       task->jobctl &= ~TASK_TRACED;
                        wake_up_state(task, __TASK_TRACED);
+               }
                unlock_task_sighand(task, &flags);
        }
 }
@@ -462,8 +469,10 @@ static int ptrace_attach(struct task_struct *task, long request,
         * in and out of STOPPED are protected by siglock.
         */
        if (task_is_stopped(task) &&
-           task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
+           task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) {
+               task->jobctl &= ~JOBCTL_STOPPED;
                signal_wake_up_state(task, __TASK_STOPPED);
+       }
 
        spin_unlock(&task->sighand->siglock);
 
@@ -875,6 +884,7 @@ static int ptrace_resume(struct task_struct *child, long request,
         */
        spin_lock_irq(&child->sighand->siglock);
        child->exit_code = data;
+       child->jobctl &= ~JOBCTL_TRACED;
        wake_up_state(child, __TASK_TRACED);
        spin_unlock_irq(&child->sighand->siglock);
 
index a58b68a2d3c6c94022aaa73ca6a3ee6e2f00cb59..e782c2611b647c71208f7baf222741c6274d9df9 100644 (file)
@@ -762,7 +762,10 @@ still_pending:
  */
 void signal_wake_up_state(struct task_struct *t, unsigned int state)
 {
+       lockdep_assert_held(&t->sighand->siglock);
+
        set_tsk_thread_flag(t, TIF_SIGPENDING);
+
        /*
         * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
         * case. We don't check t->state here because there is a race with it
@@ -930,9 +933,10 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
                for_each_thread(p, t) {
                        flush_sigqueue_mask(&flush, &t->pending);
                        task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
-                       if (likely(!(t->ptrace & PT_SEIZED)))
+                       if (likely(!(t->ptrace & PT_SEIZED))) {
+                               t->jobctl &= ~JOBCTL_STOPPED;
                                wake_up_state(t, __TASK_STOPPED);
-                       else
+                       else
                                ptrace_trap_notify(t);
                }
 
@@ -2218,6 +2222,7 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
                return exit_code;
 
        set_special_state(TASK_TRACED);
+       current->jobctl |= JOBCTL_TRACED;
 
        /*
         * We're committing to trapping.  TRACED should be visible before
@@ -2436,6 +2441,7 @@ static bool do_signal_stop(int signr)
                if (task_participate_group_stop(current))
                        notify = CLD_STOPPED;
 
+               current->jobctl |= JOBCTL_STOPPED;
                set_special_state(TASK_STOPPED);
                spin_unlock_irq(&current->sighand->siglock);