]> git.baikalelectronics.ru Git - kernel.git/commitdiff
workqueue: fix state-dump console deadlock
authorJohan Hovold <johan@kernel.org>
Wed, 6 Oct 2021 11:58:52 +0000 (13:58 +0200)
committerTejun Heo <tj@kernel.org>
Mon, 11 Oct 2021 16:50:28 +0000 (06:50 -1000)
Console drivers often queue work while holding locks also taken in their
console write paths, something which can lead to deadlocks on SMP when
dumping workqueue state (e.g. sysrq-t or on suspend failures).

For serial console drivers this could look like:

CPU0 CPU1
---- ----

show_workqueue_state();
  lock(&pool->lock); <IRQ>
     lock(&port->lock);
  schedule_work();
    lock(&pool->lock);
  printk();
    lock(console_owner);
    lock(&port->lock);

where workqueues are, for example, used to push data to the line
discipline, process break signals and handle modem-status changes. Line
disciplines and serdev drivers can also queue work on write-wakeup
notifications, etc.

Reworking every console driver to avoid queuing work while holding locks
also taken in their write paths would complicate drivers and is neither
desirable or feasible.

Instead use the deferred-printk mechanism to avoid printing while
holding pool locks when dumping workqueue state.

Note that there are a few WARN_ON() assertions in the workqueue code
which could potentially also trigger a deadlock. Hopefully the ongoing
printk rework will provide a general solution for this eventually.

This was originally reported after a lockdep splat when executing
sysrq-t with the imx serial driver.

Fixes: 3494fc30846d ("workqueue: dump workqueues on sysrq-t")
Cc: stable@vger.kernel.org # 4.0
Reported-by: Fabio Estevam <festevam@denx.de>
Tested-by: Fabio Estevam <festevam@denx.de>
Signed-off-by: Johan Hovold <johan@kernel.org>
Reviewed-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/workqueue.c

index 33a6b4a2443d274de3dfb1f31c11a460da118747..1b3eb1e9531f4ac71ebced9bac749ecea747064f 100644 (file)
@@ -4830,8 +4830,16 @@ void show_workqueue_state(void)
 
                for_each_pwq(pwq, wq) {
                        raw_spin_lock_irqsave(&pwq->pool->lock, flags);
-                       if (pwq->nr_active || !list_empty(&pwq->inactive_works))
+                       if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
+                               /*
+                                * Defer printing to avoid deadlocks in console
+                                * drivers that queue work while holding locks
+                                * also taken in their write paths.
+                                */
+                               printk_deferred_enter();
                                show_pwq(pwq);
+                               printk_deferred_exit();
+                       }
                        raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
                        /*
                         * We could be printing a lot from atomic context, e.g.
@@ -4849,7 +4857,12 @@ void show_workqueue_state(void)
                raw_spin_lock_irqsave(&pool->lock, flags);
                if (pool->nr_workers == pool->nr_idle)
                        goto next_pool;
-
+               /*
+                * Defer printing to avoid deadlocks in console drivers that
+                * queue work while holding locks also taken in their write
+                * paths.
+                */
+               printk_deferred_enter();
                pr_info("pool %d:", pool->id);
                pr_cont_pool_info(pool);
                pr_cont(" hung=%us workers=%d",
@@ -4864,6 +4877,7 @@ void show_workqueue_state(void)
                        first = false;
                }
                pr_cont("\n");
+               printk_deferred_exit();
        next_pool:
                raw_spin_unlock_irqrestore(&pool->lock, flags);
                /*