]> git.baikalelectronics.ru Git - kernel.git/commitdiff
signal: Requeue signals in the appropriate queue
authorEric W. Biederman <ebiederm@xmission.com>
Mon, 15 Nov 2021 19:47:13 +0000 (13:47 -0600)
committerEric W. Biederman <ebiederm@xmission.com>
Wed, 17 Nov 2021 16:39:12 +0000 (10:39 -0600)
In the event that a tracer changes which signal needs to be delivered
and that signal is currently blocked then the signal needs to be
requeued for later delivery.

With the advent of CLONE_THREAD the kernel has 2 signal queues per
task.  The per process queue and the per task queue.  Update the code
so that if the signal is removed from the per process queue it is
requeued on the per process queue.  This is necessary to make it
appear the signal was never dequeued.

The rr debugger reasonably believes that the state of the process from
the last ptrace_stop it observed until PTRACE_EVENT_EXIT can be recreated
by simply letting a process run.  If a SIGKILL interrupts a ptrace_stop
this is not true today.

So return signals to their original queue in ptrace_signal so that
signals that are not delivered appear like they were never dequeued.

Fixes: 794aa320b79d ("[PATCH] sigfix-2.5.40-D6")
History Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.gi
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/87zgq4d5r4.fsf_-_@email.froward.int.ebiederm.org
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
fs/signalfd.c
include/linux/sched/signal.h
kernel/signal.c

index 040e1cf9052826ef08bf269aeb63d11c073d16b4..74f134cd1ff6b77d38b1a35aa7532010423a72c4 100644 (file)
@@ -165,11 +165,12 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
 static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
                                int nonblock)
 {
+       enum pid_type type;
        ssize_t ret;
        DECLARE_WAITQUEUE(wait, current);
 
        spin_lock_irq(&current->sighand->siglock);
-       ret = dequeue_signal(current, &ctx->sigmask, info);
+       ret = dequeue_signal(current, &ctx->sigmask, info, &type);
        switch (ret) {
        case 0:
                if (!nonblock)
@@ -184,7 +185,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info
        add_wait_queue(&current->sighand->signalfd_wqh, &wait);
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
-               ret = dequeue_signal(current, &ctx->sigmask, info);
+               ret = dequeue_signal(current, &ctx->sigmask, info, &type);
                if (ret != 0)
                        break;
                if (signal_pending(current)) {
index 23505394ef7091d36219cd28b2e4ad617226f920..167995d471da74d65714aa22ecd835ef105a37ba 100644 (file)
@@ -286,17 +286,18 @@ static inline int signal_group_exit(const struct signal_struct *sig)
 extern void flush_signals(struct task_struct *);
 extern void ignore_signals(struct task_struct *);
 extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *task,
-                         sigset_t *mask, kernel_siginfo_t *info);
+extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
+                         kernel_siginfo_t *info, enum pid_type *type);
 
 static inline int kernel_dequeue_signal(void)
 {
        struct task_struct *task = current;
        kernel_siginfo_t __info;
+       enum pid_type __type;
        int ret;
 
        spin_lock_irq(&task->sighand->siglock);
-       ret = dequeue_signal(task, &task->blocked, &__info);
+       ret = dequeue_signal(task, &task->blocked, &__info, &__type);
        spin_unlock_irq(&task->sighand->siglock);
 
        return ret;
index 986fa69c15c54b7586ecdac840d8f8216a0d031d..43e8b7e362b0e9223c5ec2f234b48681b831d25b 100644 (file)
@@ -626,7 +626,8 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
  *
  * All callers have to hold the siglock.
  */
-int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
+int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
+                  kernel_siginfo_t *info, enum pid_type *type)
 {
        bool resched_timer = false;
        int signr;
@@ -634,8 +635,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
        /* We only dequeue private signals from ourselves, we don't let
         * signalfd steal them
         */
+       *type = PIDTYPE_PID;
        signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
        if (!signr) {
+               *type = PIDTYPE_TGID;
                signr = __dequeue_signal(&tsk->signal->shared_pending,
                                         mask, info, &resched_timer);
 #ifdef CONFIG_POSIX_TIMERS
@@ -2522,7 +2525,7 @@ static void do_freezer_trap(void)
        freezable_schedule();
 }
 
-static int ptrace_signal(int signr, kernel_siginfo_t *info)
+static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
 {
        /*
         * We do not check sig_kernel_stop(signr) but set this marker
@@ -2563,7 +2566,7 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info)
 
        /* If the (new) signal is now blocked, requeue it.  */
        if (sigismember(&current->blocked, signr)) {
-               send_signal(signr, info, current, PIDTYPE_PID);
+               send_signal(signr, info, current, type);
                signr = 0;
        }
 
@@ -2664,6 +2667,7 @@ relock:
 
        for (;;) {
                struct k_sigaction *ka;
+               enum pid_type type;
 
                /* Has this task already been marked for death? */
                if (signal_group_exit(signal)) {
@@ -2706,16 +2710,18 @@ relock:
                 * so that the instruction pointer in the signal stack
                 * frame points to the faulting instruction.
                 */
+               type = PIDTYPE_PID;
                signr = dequeue_synchronous_signal(&ksig->info);
                if (!signr)
-                       signr = dequeue_signal(current, &current->blocked, &ksig->info);
+                       signr = dequeue_signal(current, &current->blocked,
+                                              &ksig->info, &type);
 
                if (!signr)
                        break; /* will return 0 */
 
                if (unlikely(current->ptrace) && (signr != SIGKILL) &&
                    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
-                       signr = ptrace_signal(signr, &ksig->info);
+                       signr = ptrace_signal(signr, &ksig->info, type);
                        if (!signr)
                                continue;
                }
@@ -3540,6 +3546,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
        ktime_t *to = NULL, timeout = KTIME_MAX;
        struct task_struct *tsk = current;
        sigset_t mask = *which;
+       enum pid_type type;
        int sig, ret = 0;
 
        if (ts) {
@@ -3556,7 +3563,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
        signotset(&mask);
 
        spin_lock_irq(&tsk->sighand->siglock);
-       sig = dequeue_signal(tsk, &mask, info);
+       sig = dequeue_signal(tsk, &mask, info, &type);
        if (!sig && timeout) {
                /*
                 * None ready, temporarily unblock those we're interested
@@ -3575,7 +3582,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
                spin_lock_irq(&tsk->sighand->siglock);
                __set_task_blocked(tsk, &tsk->real_blocked);
                sigemptyset(&tsk->real_blocked);
-               sig = dequeue_signal(tsk, &mask, info);
+               sig = dequeue_signal(tsk, &mask, info, &type);
        }
        spin_unlock_irq(&tsk->sighand->siglock);