struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = cmd->nvmeq;
- /*
- * The aborted req will be completed on receiving the abort req.
- * We enable the timer again. If hit twice, it'll cause a device reset,
- * as the device then is in a faulty state.
- */
- int ret = BLK_EH_RESET_TIMER;
-
dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
nvmeq->qid);
-
spin_lock_irq(&nvmeq->q_lock);
- if (!nvmeq->dev->initialized) {
- /*
- * Force cancelled command frees the request, which requires we
- * return BLK_EH_NOT_HANDLED.
- */
- nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
- ret = BLK_EH_NOT_HANDLED;
- } else
- nvme_abort_req(req);
+ nvme_abort_req(req);
spin_unlock_irq(&nvmeq->q_lock);
- return ret;
+ /*
+ * The aborted req will be completed on receiving the abort req.
+ * We enable the timer again. If hit twice, it'll cause a device reset,
+ * as the device then is in a faulty state.
+ */
+ return BLK_EH_RESET_TIMER;
}
static void nvme_free_queue(struct nvme_queue *nvmeq)
struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
spin_lock_irq(&nvmeq->q_lock);
- nvme_process_cq(nvmeq);
if (hctx && hctx->tags)
blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
}
if (!qid && dev->admin_q)
blk_mq_freeze_queue_start(dev->admin_q);
- nvme_clear_queue(nvmeq);
+
+ spin_lock_irq(&nvmeq->q_lock);
+ nvme_process_cq(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
spin_lock(&dev_list_lock);
list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
- if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
- dev->initialized) {
+ if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
if (work_busy(&dev->reset_work))
continue;
list_del_init(&dev->node);
static void nvme_del_queue_end(struct nvme_queue *nvmeq)
{
struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
-
- nvme_clear_queue(nvmeq);
nvme_put_dq(dq);
}
int i;
u32 csts = -1;
- dev->initialized = 0;
nvme_dev_list_remove(dev);
if (dev->bar) {
for (i = dev->queue_count - 1; i >= 0; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
nvme_suspend_queue(nvmeq);
- nvme_clear_queue(nvmeq);
}
} else {
nvme_disable_io_queues(dev);
nvme_disable_queue(dev, 0);
}
nvme_dev_unmap(dev);
+
+ for (i = dev->queue_count - 1; i >= 0; i--)
+ nvme_clear_queue(dev->queues[i]);
}
static void nvme_dev_remove(struct nvme_dev *dev)
nvme_unfreeze_queues(dev);
nvme_set_irq_hints(dev);
}
- dev->initialized = 1;
return 0;
}
goto reset;
nvme_set_irq_hints(dev);
- dev->initialized = 1;
return;
reset:
- dev->reset_workfn = nvme_reset_failed_dev;
- queue_work(nvme_workq, &dev->reset_work);
+ if (!work_busy(&dev->reset_work)) {
+ dev->reset_workfn = nvme_reset_failed_dev;
+ queue_work(nvme_workq, &dev->reset_work);
+ }
}
static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)