]> git.baikalelectronics.ru Git - kernel.git/commitdiff
iommu/dma: Refactor iommu_dma_get_sgtable
authorChristoph Hellwig <hch@lst.de>
Mon, 20 May 2019 07:29:43 +0000 (09:29 +0200)
committerJoerg Roedel <jroedel@suse.de>
Mon, 27 May 2019 15:31:11 +0000 (17:31 +0200)
Inline __iommu_dma_get_sgtable_page into the main function, and use the
fact that __iommu_dma_get_pages return NULL for remapped contigous
allocations to simplify the code flow a bit.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/dma-iommu.c

index 84761adbb1d49745ba21a1a7b228e62c5cd29d5c..fa95794868a45498bef3238fd0c0bde2062a4d08 100644 (file)
@@ -1097,42 +1097,31 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
        return __iommu_dma_mmap(pages, size, vma);
 }
 
-static int __iommu_dma_get_sgtable_page(struct sg_table *sgt, struct page *page,
-               size_t size)
-{
-       int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-
-       if (!ret)
-               sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-       return ret;
-}
-
 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs)
 {
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       struct page **pages;
+       struct page *page;
+       int ret;
 
-       if (!is_vmalloc_addr(cpu_addr)) {
-               struct page *page = virt_to_page(cpu_addr);
-               return __iommu_dma_get_sgtable_page(sgt, page, size);
-       }
+       if (is_vmalloc_addr(cpu_addr)) {
+               struct page **pages = __iommu_dma_get_pages(cpu_addr);
 
-       if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
-               /*
-                * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
-                * hence in the vmalloc space.
-                */
-               struct page *page = vmalloc_to_page(cpu_addr);
-               return __iommu_dma_get_sgtable_page(sgt, page, size);
+               if (pages) {
+                       return sg_alloc_table_from_pages(sgt, pages,
+                                       PAGE_ALIGN(size) >> PAGE_SHIFT,
+                                       0, size, GFP_KERNEL);
+               }
+
+               page = vmalloc_to_page(cpu_addr);
+       } else {
+               page = virt_to_page(cpu_addr);
        }
 
-       pages = __iommu_dma_get_pages(cpu_addr);
-       if (!pages)
-               return -ENXIO;
-       return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
-                                        GFP_KERNEL);
+       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       if (!ret)
+               sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+       return ret;
 }
 
 static const struct dma_map_ops iommu_dma_ops = {