]> git.baikalelectronics.ru Git - kernel.git/commitdiff
nvme-tcp: fix incorrect h2cdata pdu offset accounting
authorSagi Grimberg <sagi@grimberg.me>
Tue, 14 Sep 2021 15:38:55 +0000 (18:38 +0300)
committerChristoph Hellwig <hch@lst.de>
Tue, 21 Sep 2021 07:17:15 +0000 (09:17 +0200)
When the controller sends us multiple r2t PDUs in a single
request we need to account for it correctly as our send/recv
context run concurrently (i.e. we get a new r2t with r2t_offset
before we updated our iterator and req->data_sent marker). This
can cause wrong offsets to be sent to the controller.

To fix that, we will first know that this may happen only in
the send sequence of the last page, hence we will take
the r2t_offset to the h2c PDU data_offset, and in
nvme_tcp_try_send_data loop, we make sure to increment
the request markers also when we completed a PDU but
we are expecting more r2t PDUs as we still did not send
the entire data of the request.

Fixes: db63c928047b ("nvme-tcp: fix possible use-after-completion")
Reported-by: Nowak, Lukasz <Lukasz.Nowak@Dell.com>
Tested-by: Nowak, Lukasz <Lukasz.Nowak@Dell.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/host/tcp.c

index e4249b7dc05682fad3893e4ec6f3ee182b1e22d2..3c1c29dd30207cd6fbf337a67aa72e8ef34cfddd 100644 (file)
@@ -620,7 +620,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
                cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
        data->ttag = pdu->ttag;
        data->command_id = nvme_cid(rq);
-       data->data_offset = cpu_to_le32(req->data_sent);
+       data->data_offset = pdu->r2t_offset;
        data->data_length = cpu_to_le32(req->pdu_len);
        return 0;
 }
@@ -953,7 +953,15 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
                        nvme_tcp_ddgst_update(queue->snd_hash, page,
                                        offset, ret);
 
-               /* fully successful last write*/
+               /*
+                * update the request iterator except for the last payload send
+                * in the request where we don't want to modify it as we may
+                * compete with the RX path completing the request.
+                */
+               if (req->data_sent + ret < req->data_len)
+                       nvme_tcp_advance_req(req, ret);
+
+               /* fully successful last send in current PDU */
                if (last && ret == len) {
                        if (queue->data_digest) {
                                nvme_tcp_ddgst_final(queue->snd_hash,
@@ -965,7 +973,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
                        }
                        return 1;
                }
-               nvme_tcp_advance_req(req, ret);
        }
        return -EAGAIN;
 }