]> git.baikalelectronics.ru Git - kernel.git/commitdiff
dma-direct: remove __dma_direct_free_pages
authorChristoph Hellwig <hch@lst.de>
Tue, 29 Oct 2019 08:57:09 +0000 (09:57 +0100)
committerChristoph Hellwig <hch@lst.de>
Thu, 7 Nov 2019 16:25:40 +0000 (17:25 +0100)
We can just call dma_free_contiguous directly instead of wrapping it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
include/linux/dma-direct.h
kernel/dma/direct.c
kernel/dma/remap.c

index adf993a3bd58010de6db1c6e39c85ea2f085818d..dec3b3bb121da39e94367b0bf1a5db914c6507e0 100644 (file)
@@ -68,6 +68,5 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs);
 struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
-void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
 int dma_direct_supported(struct device *dev, u64 mask);
 #endif /* _LINUX_DMA_DIRECT_H */
index 8402b29c280f560ae18420f9a99fc3f614c40293..a7a2739fb7475df03c7912dc5129fdd7f4f15b22 100644 (file)
@@ -153,7 +153,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
                 * so log an error and fail.
                 */
                dev_info(dev, "Rejecting highmem page from CMA.\n");
-               __dma_direct_free_pages(dev, size, page);
+               dma_free_contiguous(dev, page, size);
                return NULL;
        }
 
@@ -175,11 +175,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
        return ret;
 }
 
-void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
-{
-       dma_free_contiguous(dev, page, size);
-}
-
 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs)
 {
@@ -188,7 +183,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
        if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
            !force_dma_unencrypted(dev)) {
                /* cpu_addr is a struct page cookie, not a kernel address */
-               __dma_direct_free_pages(dev, size, cpu_addr);
+               dma_free_contiguous(dev, cpu_addr, size);
                return;
        }
 
@@ -198,7 +193,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
        if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
            dma_alloc_need_uncached(dev, attrs))
                cpu_addr = cached_kernel_address(cpu_addr);
-       __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
+       dma_free_contiguous(dev, virt_to_page(cpu_addr), size);
 }
 
 void *dma_direct_alloc(struct device *dev, size_t size,
index c00b9258fa6abaa77aecbd6a6f53a7e75dc00d93..fb1e50c2d48a19194c7f0c75a65506d1cfddceb2 100644 (file)
@@ -238,7 +238,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
                        dma_pgprot(dev, PAGE_KERNEL, attrs),
                        __builtin_return_address(0));
        if (!ret) {
-               __dma_direct_free_pages(dev, size, page);
+               dma_free_contiguous(dev, page, size);
                return ret;
        }
 
@@ -256,7 +256,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
                struct page *page = pfn_to_page(__phys_to_pfn(phys));
 
                vunmap(vaddr);
-               __dma_direct_free_pages(dev, size, page);
+               dma_free_contiguous(dev, page, size);
        }
 }