]> git.baikalelectronics.ru Git - kernel.git/commitdiff
nvme: use blk_execute_rq() for passthrough commands
authorKeith Busch <kbusch@kernel.org>
Thu, 10 Jun 2021 21:44:35 +0000 (14:44 -0700)
committerJens Axboe <axboe@kernel.dk>
Wed, 30 Jun 2021 21:35:38 +0000 (15:35 -0600)
The generic blk_execute_rq() knows how to handle polled completions. Use
that instead of implementing an nvme specific handler.

Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Link: https://lore.kernel.org/r/20210610214437.641245-3-kbusch@kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c

index 80c656dcbbacfebf8c68c1840bd42cd3c94ee257..e0d3f0aa25da6234637b9882c2939fd01d096016 100644 (file)
@@ -631,6 +631,8 @@ static inline void nvme_init_request(struct request *req,
        cmd->common.flags &= ~NVME_CMD_SGL_ALL;
 
        req->cmd_flags |= REQ_FAILFAST_DRIVER;
+       if (req->mq_hctx->type == HCTX_TYPE_POLL)
+               req->cmd_flags |= REQ_HIPRI;
        nvme_clear_nvme_request(req);
        memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
 }
@@ -1029,31 +1031,6 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
 }
 EXPORT_SYMBOL_GPL(nvme_setup_cmd);
 
-static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
-{
-       struct completion *waiting = rq->end_io_data;
-
-       rq->end_io_data = NULL;
-       complete(waiting);
-}
-
-static void nvme_execute_rq_polled(struct request_queue *q,
-               struct gendisk *bd_disk, struct request *rq, int at_head)
-{
-       DECLARE_COMPLETION_ONSTACK(wait);
-
-       WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
-
-       rq->cmd_flags |= REQ_HIPRI;
-       rq->end_io_data = &wait;
-       blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq);
-
-       while (!completion_done(&wait)) {
-               blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
-               cond_resched();
-       }
-}
-
 /*
  * Returns 0 on success.  If the result is negative, it's a Linux error code;
  * if the result is positive, it's an NVM Express status code
@@ -1061,7 +1038,7 @@ static void nvme_execute_rq_polled(struct request_queue *q,
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                union nvme_result *result, void *buffer, unsigned bufflen,
                unsigned timeout, int qid, int at_head,
-               blk_mq_req_flags_t flags, bool poll)
+               blk_mq_req_flags_t flags)
 {
        struct request *req;
        int ret;
@@ -1082,10 +1059,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                        goto out;
        }
 
-       if (poll)
-               nvme_execute_rq_polled(req->q, NULL, req, at_head);
-       else
-               blk_execute_rq(NULL, req, at_head);
+       blk_execute_rq(NULL, req, at_head);
        if (result)
                *result = nvme_req(req)->result;
        if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
@@ -1102,7 +1076,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buffer, unsigned bufflen)
 {
        return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
-                       NVME_QID_ANY, 0, 0, false);
+                       NVME_QID_ANY, 0, 0);
 }
 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
 
@@ -1465,7 +1439,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
        c.features.dword11 = cpu_to_le32(dword11);
 
        ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
-                       buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
+                       buffer, buflen, 0, NVME_QID_ANY, 0, 0);
        if (ret >= 0 && result)
                *result = le32_to_cpu(res.u32);
        return ret;
@@ -2047,7 +2021,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
        cmd.common.cdw11 = cpu_to_le32(len);
 
        return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
-                       NVME_QID_ANY, 1, 0, false);
+                       NVME_QID_ANY, 1, 0);
 }
 EXPORT_SYMBOL_GPL(nvme_sec_submit);
 #endif /* CONFIG_BLK_SED_OPAL */
index 1e6a7cc056cafb6c7f19414224913ad4a3da87e6..a5469fd9d4c31fa15e4ea43f292ce74eeff97b7a 100644 (file)
@@ -154,7 +154,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
        cmd.prop_get.offset = cpu_to_le32(off);
 
        ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
-                       NVME_QID_ANY, 0, 0, false);
+                       NVME_QID_ANY, 0, 0);
 
        if (ret >= 0)
                *val = le64_to_cpu(res.u64);
@@ -200,7 +200,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
        cmd.prop_get.offset = cpu_to_le32(off);
 
        ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
-                       NVME_QID_ANY, 0, 0, false);
+                       NVME_QID_ANY, 0, 0);
 
        if (ret >= 0)
                *val = le64_to_cpu(res.u64);
@@ -245,7 +245,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
        cmd.prop_set.value = cpu_to_le64(val);
 
        ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
-                       NVME_QID_ANY, 0, 0, false);
+                       NVME_QID_ANY, 0, 0);
        if (unlikely(ret))
                dev_err(ctrl->device,
                        "Property Set error: %d, offset %#x\n",
@@ -391,7 +391,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 
        ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
                        data, sizeof(*data), 0, NVME_QID_ANY, 1,
-                       BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
+                       BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
        if (ret) {
                nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
                                       &cmd, data);
@@ -415,7 +415,6 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
  * @qid:       NVMe I/O queue number for the new I/O connection between
  *             host and target (note qid == 0 is illegal as this is
  *             the Admin queue, per NVMe standard).
- * @poll:      Whether or not to poll for the completion of the connect cmd.
  *
  * This function issues a fabrics-protocol connection
  * of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
@@ -427,7 +426,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
  *     > 0: NVMe error status code
  *     < 0: Linux errno error code
  */
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
 {
        struct nvme_command cmd = { };
        struct nvmf_connect_data *data;
@@ -453,7 +452,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
 
        ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
                        data, sizeof(*data), 0, qid, 1,
-                       BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
+                       BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
        if (ret) {
                nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
                                       &cmd, data);
index c31dad69a7738462cf8cc8b071d9b9d9f3eba9fe..a146cb903869cb7412b082e5973e20e9783ec0f4 100644 (file)
@@ -182,7 +182,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
 int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
 int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
 int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll);
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
 int nvmf_register_transport(struct nvmf_transport_ops *ops);
 void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
 void nvmf_free_options(struct nvmf_ctrl_options *opts);
index 7600863f7752b1f886420763140035b096bce4bb..7f462af1b02afd1a45806eebcb4535d6ae066830 100644 (file)
@@ -2346,7 +2346,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
                                        (qsize / 5));
                if (ret)
                        break;
-               ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
+               ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
                if (ret)
                        break;
 
index 75420ceacc10441a895629a12c4fe54f7a1ca1f2..3b12ad78ee7a6abad3f2fec81ee3115c6bd9d6b5 100644 (file)
@@ -658,7 +658,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                union nvme_result *result, void *buffer, unsigned bufflen,
                unsigned timeout, int qid, int at_head,
-               blk_mq_req_flags_t flags, bool poll);
+               blk_mq_req_flags_t flags);
 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
                      unsigned int dword11, void *buffer, size_t buflen,
                      u32 *result);
index a9e70cefd7eda65c2e8c23c81cfa0ef00ba9afe4..7f6b3a9915014d1c79d039a7cc60fe779d65f34e 100644 (file)
@@ -680,11 +680,10 @@ static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
 static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
 {
        struct nvme_rdma_queue *queue = &ctrl->queues[idx];
-       bool poll = nvme_rdma_poll_queue(queue);
        int ret;
 
        if (idx)
-               ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
+               ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
        else
                ret = nvmf_connect_admin_queue(&ctrl->ctrl);
 
index c7bd37103cf402d7ffaed77c6f9f2cd3d3a0509d..12acfe05cd68fa1f8c8e91da0b6cb033e3b9219c 100644 (file)
@@ -1574,7 +1574,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
        int ret;
 
        if (idx)
-               ret = nvmf_connect_io_queue(nctrl, idx, false);
+               ret = nvmf_connect_io_queue(nctrl, idx);
        else
                ret = nvmf_connect_admin_queue(nctrl);
 
index a5c4a186502639cd6daec64ff1f02a3ab5954f0c..3a17a7e26bbfc060a88503b3865ce1de6341a685 100644 (file)
@@ -337,7 +337,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
        int i, ret;
 
        for (i = 1; i < ctrl->ctrl.queue_count; i++) {
-               ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
+               ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
                if (ret)
                        return ret;
                set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);