]> git.baikalelectronics.ru Git - kernel.git/commitdiff
dma-mapping: merge the generic remapping helpers into dma-direct
authorChristoph Hellwig <hch@lst.de>
Tue, 29 Oct 2019 10:06:32 +0000 (11:06 +0100)
committerChristoph Hellwig <hch@lst.de>
Mon, 11 Nov 2019 09:52:18 +0000 (10:52 +0100)
Integrate the generic dma remapping implementation into the main flow.
This prepares for architectures like xtensa that use an uncached
segment for pages in the kernel mapping, but can also remap highmem
from CMA.  To simplify that implementation we now always deduct the
page from the physical address via the DMA address instead of the
virtual address.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
kernel/dma/direct.c
kernel/dma/remap.c

index 58beaa9ddd27bf5891046837fa817570de05324f..22a2e08338629c29f340f11c05e87092c5373a67 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/dma-contiguous.h>
 #include <linux/dma-noncoherent.h>
 #include <linux/pfn.h>
+#include <linux/vmalloc.h>
 #include <linux/set_memory.h>
 #include <linux/swiotlb.h>
 
@@ -137,6 +138,15 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
        struct page *page;
        void *ret;
 
+       if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+           dma_alloc_need_uncached(dev, attrs) &&
+           !gfpflags_allow_blocking(gfp)) {
+               ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
+               if (!ret)
+                       return NULL;
+               goto done;
+       }
+
        page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
        if (!page)
                return NULL;
@@ -146,9 +156,28 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
                /* remove any dirty cache lines on the kernel alias */
                if (!PageHighMem(page))
                        arch_dma_prep_coherent(page, size);
-               *dma_handle = phys_to_dma(dev, page_to_phys(page));
                /* return the page pointer as the opaque cookie */
-               return page;
+               ret = page;
+               goto done;
+       }
+
+       if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+            dma_alloc_need_uncached(dev, attrs)) ||
+           (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+               /* remove any dirty cache lines on the kernel alias */
+               arch_dma_prep_coherent(page, PAGE_ALIGN(size));
+
+               /* create a coherent mapping */
+               ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
+                               dma_pgprot(dev, PAGE_KERNEL, attrs),
+                               __builtin_return_address(0));
+               if (!ret) {
+                       dma_free_contiguous(dev, page, size);
+                       return ret;
+               }
+
+               memset(ret, 0, size);
+               goto done;
        }
 
        if (PageHighMem(page)) {
@@ -164,12 +193,9 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
        }
 
        ret = page_address(page);
-       if (force_dma_unencrypted(dev)) {
+       if (force_dma_unencrypted(dev))
                set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
-               *dma_handle = __phys_to_dma(dev, page_to_phys(page));
-       } else {
-               *dma_handle = phys_to_dma(dev, page_to_phys(page));
-       }
+
        memset(ret, 0, size);
 
        if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
@@ -177,7 +203,11 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
                arch_dma_prep_coherent(page, size);
                ret = uncached_kernel_address(ret);
        }
-
+done:
+       if (force_dma_unencrypted(dev))
+               *dma_handle = __phys_to_dma(dev, page_to_phys(page));
+       else
+               *dma_handle = phys_to_dma(dev, page_to_phys(page));
        return ret;
 }
 
@@ -193,19 +223,24 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
                return;
        }
 
+       if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+           dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
+               return;
+
        if (force_dma_unencrypted(dev))
                set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
 
-       if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
-           dma_alloc_need_uncached(dev, attrs))
-               cpu_addr = cached_kernel_address(cpu_addr);
-       dma_free_contiguous(dev, virt_to_page(cpu_addr), size);
+       if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
+               vunmap(cpu_addr);
+
+       dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
 }
 
 void *dma_direct_alloc(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
        if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+           !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            dma_alloc_need_uncached(dev, attrs))
                return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
        return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
@@ -215,6 +250,7 @@ void dma_direct_free(struct device *dev, size_t size,
                void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
 {
        if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
+           !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
            dma_alloc_need_uncached(dev, attrs))
                arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
        else
index 3c49499ee6b08f9aa2c3328de73692ec03415b5a..d47bd40fc0f50b712d76c1a0f6cc446d4c5cabf9 100644 (file)
@@ -210,53 +210,4 @@ bool dma_free_from_pool(void *start, size_t size)
        gen_pool_free(atomic_pool, (unsigned long)start, size);
        return true;
 }
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t flags, unsigned long attrs)
-{
-       struct page *page = NULL;
-       void *ret;
-
-       size = PAGE_ALIGN(size);
-
-       if (!gfpflags_allow_blocking(flags)) {
-               ret = dma_alloc_from_pool(size, &page, flags);
-               if (!ret)
-                       return NULL;
-               goto done;
-       }
-
-       page = __dma_direct_alloc_pages(dev, size, flags, attrs);
-       if (!page)
-               return NULL;
-
-       /* remove any dirty cache lines on the kernel alias */
-       arch_dma_prep_coherent(page, size);
-
-       /* create a coherent mapping */
-       ret = dma_common_contiguous_remap(page, size,
-                       dma_pgprot(dev, PAGE_KERNEL, attrs),
-                       __builtin_return_address(0));
-       if (!ret) {
-               dma_free_contiguous(dev, page, size);
-               return ret;
-       }
-
-       memset(ret, 0, size);
-done:
-       *dma_handle = phys_to_dma(dev, page_to_phys(page));
-       return ret;
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_handle, unsigned long attrs)
-{
-       if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
-               phys_addr_t phys = dma_to_phys(dev, dma_handle);
-               struct page *page = pfn_to_page(__phys_to_pfn(phys));
-
-               vunmap(vaddr);
-               dma_free_contiguous(dev, page, size);
-       }
-}
 #endif /* CONFIG_DMA_DIRECT_REMAP */