]> git.baikalelectronics.ru Git - kernel.git/commitdiff
nvme: prevent warning triggered by nvme_stop_keep_alive
authorNigel Kirkland <nigel.kirkland@broadcom.com>
Tue, 11 Feb 2020 00:01:45 +0000 (16:01 -0800)
committerJens Axboe <axboe@kernel.dk>
Fri, 14 Feb 2020 17:12:04 +0000 (10:12 -0700)
Delayed keep alive work is queued on system workqueue and may be cancelled
via nvme_stop_keep_alive from nvme_reset_wq, nvme_fc_wq or nvme_wq.

Check_flush_dependency detects mismatched attributes between the work-queue
context used to cancel the keep alive work and system-wq. Specifically
system-wq does not have the WQ_MEM_RECLAIM flag, whereas the contexts used
to cancel keep alive work have WQ_MEM_RECLAIM flag.

Example warning:

  workqueue: WQ_MEM_RECLAIM nvme-reset-wq:nvme_fc_reset_ctrl_work [nvme_fc]
is flushing !WQ_MEM_RECLAIM events:nvme_keep_alive_work [nvme_core]

To avoid the flags mismatch, delayed keep alive work is queued on nvme_wq.

However this creates a secondary concern where work and a request to cancel
that work may be in the same work queue - namely err_work in the rdma and
tcp transports, which will want to flush/cancel the keep alive work which
will now be on nvme_wq.

After reviewing the transports, it looks like err_work can be moved to
nvme_reset_wq. In fact that aligns them better with transition into
RESETTING and performing related reset work in nvme_reset_wq.

Change nvme-rdma and nvme-tcp to perform err_work in nvme_reset_wq.

Signed-off-by: Nigel Kirkland <nigel.kirkland@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/core.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c

index 5dc32b72e7faab7875640106514ab88d4976ebf9..7f05deada7f4e39ec2e4d2a9d41b02ea4a72891a 100644 (file)
@@ -66,8 +66,8 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
  * nvme_reset_wq - hosts nvme reset works
  * nvme_delete_wq - hosts nvme delete works
  *
- * nvme_wq will host works such are scan, aen handling, fw activation,
- * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
+ * nvme_wq will host works such as scan, aen handling, fw activation,
+ * keep-alive, periodic reconnects etc. nvme_reset_wq
  * runs reset works which also flush works hosted on nvme_wq for
  * serialization purposes. nvme_delete_wq host controller deletion
  * works which flush reset works for serialization.
@@ -976,7 +976,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
                startka = true;
        spin_unlock_irqrestore(&ctrl->lock, flags);
        if (startka)
-               schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+               queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
@@ -1006,7 +1006,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
                dev_dbg(ctrl->device,
                        "reschedule traffic based keep-alive timer\n");
                ctrl->comp_seen = false;
-               schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+               queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
                return;
        }
 
@@ -1023,7 +1023,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
        if (unlikely(ctrl->kato == 0))
                return;
 
-       schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+       queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
index 2a47c6c5007e1280a320f9776afe10005e23b98a..3e85c5cacefd25f742def056fc6d485443a5345f 100644 (file)
@@ -1088,7 +1088,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
                return;
 
-       queue_work(nvme_wq, &ctrl->err_work);
+       queue_work(nvme_reset_wq, &ctrl->err_work);
 }
 
 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
index f8fa5c5b79f179fe7af5fdfe1a0c20535143342e..49d4373b84eb392531d7ce6f60099a41c042907d 100644 (file)
@@ -422,7 +422,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
                return;
 
-       queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
+       queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
 }
 
 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,