]> git.baikalelectronics.ru Git - kernel.git/commitdiff
nvme-pci: refactor nvme_unmap_data
authorChristoph Hellwig <hch@lst.de>
Wed, 20 Jan 2021 08:33:52 +0000 (09:33 +0100)
committerChristoph Hellwig <hch@lst.de>
Wed, 20 Jan 2021 17:56:26 +0000 (18:56 +0100)
Split out three helpers from nvme_unmap_data that will allow finer grained
unwinding from nvme_map_data.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Marc Orr <marcorr@google.com>
drivers/nvme/host/pci.c

index 25456d02eddb8cf4af4cda4026ae146be877af9d..e29ece9e4d4b8eb1d7e80c0e3aa82be97eef6dbf 100644 (file)
@@ -543,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
        return true;
 }
 
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
 {
-       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
-       dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       dma_addr_t dma_addr = iod->first_dma;
        int i;
 
-       if (iod->dma_len) {
-               dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
-                              rq_dma_dir(req));
-               return;
+       for (i = 0; i < iod->npages; i++) {
+               __le64 *prp_list = nvme_pci_iod_list(req)[i];
+               dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+               dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+               dma_addr = next_dma_addr;
        }
 
-       WARN_ON_ONCE(!iod->nents);
+}
 
-       if (is_pci_p2pdma_page(sg_page(iod->sg)))
-               pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
-                                   rq_dma_dir(req));
-       else
-               dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
+{
+       const int last_sg = SGES_PER_PAGE - 1;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       dma_addr_t dma_addr = iod->first_dma;
+       int i;
 
+       for (i = 0; i < iod->npages; i++) {
+               struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
+               dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
 
-       if (iod->npages == 0)
-               dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
-                       dma_addr);
+               dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
+               dma_addr = next_dma_addr;
+       }
 
-       for (i = 0; i < iod->npages; i++) {
-               void *addr = nvme_pci_iod_list(req)[i];
+}
 
-               if (iod->use_sgl) {
-                       struct nvme_sgl_desc *sg_list = addr;
+static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
-                       next_dma_addr =
-                           le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
-               } else {
-                       __le64 *prp_list = addr;
+       if (is_pci_p2pdma_page(sg_page(iod->sg)))
+               pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
+                                   rq_dma_dir(req));
+       else
+               dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+}
 
-                       next_dma_addr = le64_to_cpu(prp_list[last_prp]);
-               }
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
-               dma_pool_free(dev->prp_page_pool, addr, dma_addr);
-               dma_addr = next_dma_addr;
+       if (iod->dma_len) {
+               dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+                              rq_dma_dir(req));
+               return;
        }
 
+       WARN_ON_ONCE(!iod->nents);
+
+       nvme_unmap_sg(dev, req);
+       if (iod->npages == 0)
+               dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+                             iod->first_dma);
+       else if (iod->use_sgl)
+               nvme_free_sgls(dev, req);
+       else
+               nvme_free_prps(dev, req);
        mempool_free(iod->sg, dev->iod_mempool);
 }