]> git.baikalelectronics.ru Git - kernel.git/commitdiff
dma-mapping: drop the dev argument to arch_sync_dma_for_*
authorChristoph Hellwig <hch@lst.de>
Thu, 7 Nov 2019 17:03:11 +0000 (18:03 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 5 Apr 2023 09:16:43 +0000 (11:16 +0200)
[ Upstream commit 94cadc454f916e2988057d75009161acecdd09d5 ]

These are pure cache maintainance routines, so drop the unused
struct device argument.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Suggested-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Stable-dep-of: ab327f8acdf8 ("mips: bmips: BCM6358: disable RAC flush for TP1")
Signed-off-by: Sasha Levin <sashal@kernel.org>
26 files changed:
arch/arc/mm/dma.c
arch/arm/mm/dma-mapping.c
arch/arm/xen/mm.c
arch/arm64/mm/dma-mapping.c
arch/c6x/mm/dma-coherent.c
arch/csky/mm/dma-mapping.c
arch/hexagon/kernel/dma.c
arch/ia64/mm/init.c
arch/m68k/kernel/dma.c
arch/microblaze/kernel/dma.c
arch/mips/bmips/dma.c
arch/mips/jazz/jazzdma.c
arch/mips/mm/dma-noncoherent.c
arch/nds32/kernel/dma.c
arch/nios2/mm/dma-mapping.c
arch/openrisc/kernel/dma.c
arch/parisc/kernel/pci-dma.c
arch/powerpc/mm/dma-noncoherent.c
arch/sh/kernel/dma-coherent.c
arch/sparc/kernel/ioport.c
arch/xtensa/kernel/pci-dma.c
drivers/iommu/dma-iommu.c
drivers/xen/swiotlb-xen.c
include/linux/dma-noncoherent.h
include/xen/swiotlb-xen.h
kernel/dma/direct.c

index 73a7e88a1e9264c042418fe3a7cc070bb4581682..e947572a521ec08557c1580992b67f1887f9db24 100644 (file)
@@ -48,8 +48,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
  * upper layer functions (in include/linux/dma-mapping.h)
  */
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_TO_DEVICE:
@@ -69,8 +69,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
        }
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_TO_DEVICE:
index 27576c7b836ee6ecf3cb210749bdad46f898ee38..fbfb9250e743affcbdb976f4329a6f7e00941ee9 100644 (file)
@@ -2332,15 +2332,15 @@ void arch_teardown_dma_ops(struct device *dev)
 }
 
 #ifdef CONFIG_SWIOTLB
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
                              size, dir);
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
                              size, dir);
index 38fa917c8585c7e6b6e2efe75d785885f41260a2..a6a2514e5fe8fb0370b164128c5d6570ccd73109 100644 (file)
@@ -70,20 +70,20 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
  * pfn_valid returns true the pages is local and we can use the native
  * dma-direct functions, otherwise we call the Xen specific version.
  */
-void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
-               phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        if (pfn_valid(PFN_DOWN(handle)))
-               arch_sync_dma_for_cpu(dev, paddr, size, dir);
+               arch_sync_dma_for_cpu(paddr, size, dir);
        else if (dir != DMA_TO_DEVICE)
                dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
 }
 
-void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
-               phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        if (pfn_valid(PFN_DOWN(handle)))
-               arch_sync_dma_for_device(dev, paddr, size, dir);
+               arch_sync_dma_for_device(paddr, size, dir);
        else if (dir == DMA_FROM_DEVICE)
                dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
        else
index 9239416e93d4e9a9787ea9d5801914c075c359a9..6c45350e33aa5a47ce0ea74aaed61c945c1a101f 100644 (file)
 
 #include <asm/cacheflush.h>
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        __dma_map_area(phys_to_virt(paddr), size, dir);
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        __dma_unmap_area(phys_to_virt(paddr), size, dir);
 }
index b319808e8f6bd3948790aee38711a3472d5b7565..a5909091cb14244f635d73a9b6ad76e3a03b6e19 100644 (file)
@@ -140,7 +140,7 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
                      sizeof(long));
 }
 
-static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
+static void c6x_dma_sync(phys_addr_t paddr, size_t size,
                enum dma_data_direction dir)
 {
        BUG_ON(!valid_dma_direction(dir));
@@ -160,14 +160,14 @@ static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
        }
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
-       return c6x_dma_sync(dev, paddr, size, dir);
+       return c6x_dma_sync(paddr, size, dir);
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
-       return c6x_dma_sync(dev, paddr, size, dir);
+       return c6x_dma_sync(paddr, size, dir);
 }
index 06e85b56545427de4f1883d95a59319f817c85d3..8f6571ae27c867ad350c1eb2779e4e65b5fbd0b3 100644 (file)
@@ -58,8 +58,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
        cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-                             size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_TO_DEVICE:
@@ -74,8 +74,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
        }
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-                          size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_TO_DEVICE:
index f561b127c4b43caa9324805e9c0998491b06ef27..25f388d9cfcc36650454ecd55217bff5c6eac7da 100644 (file)
@@ -55,8 +55,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
        gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        void *addr = phys_to_virt(paddr);
 
index ee50506d86f426cbe1c30c58ef277449a59e159e..df6d3dfa9d820637a6be2f005d7f22ab0e7d1a9f 100644 (file)
@@ -73,8 +73,8 @@ __ia64_sync_icache_dcache (pte_t pte)
  * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
  * flush them when they get mapped into an executable vm-area.
  */
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        unsigned long pfn = PHYS_PFN(paddr);
 
index 3fab684cc0db0b3f67ef346f2e7a15989e2d0260..871a0e11da341ada53ba4b816f98b676a1bb6ea2 100644 (file)
@@ -61,8 +61,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
 
 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
+               enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_BIDIRECTIONAL:
index a89c2d4ed5ffc74dfa54cb864885d3596e9bbf6c..d7bebd04247b72b797185cca5494dd7ca8755fea 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/bug.h>
 #include <asm/cacheflush.h>
 
-static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
+static void __dma_sync(phys_addr_t paddr, size_t size,
                enum dma_data_direction direction)
 {
        switch (direction) {
@@ -31,14 +31,14 @@ static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
        }
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
-       __dma_sync(dev, paddr, size, dir);
+       __dma_sync(paddr, size, dir);
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
-       __dma_sync(dev, paddr, size, dir);
+       __dma_sync(paddr, size, dir);
 }
index 3d13c77c125f4a8b7fa7097446e7c550f43ecc19..df56bf4179e347b46a42bc41793a70f490c9811f 100644 (file)
@@ -64,7 +64,7 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
        return dma_addr;
 }
 
-void arch_sync_dma_for_cpu_all(struct device *dev)
+void arch_sync_dma_for_cpu_all(void)
 {
        void __iomem *cbr = BMIPS_GET_CBR();
        u32 cfg;
index a01e14955187e40140b7b065ca665e42ecb2b172..c64a297e82b3c33323d19fb769ba4450fde5f32d 100644 (file)
@@ -592,7 +592,7 @@ static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
        phys_addr_t phys = page_to_phys(page) + offset;
 
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               arch_sync_dma_for_device(dev, phys, size, dir);
+               arch_sync_dma_for_device(phys, size, dir);
        return vdma_alloc(phys, size);
 }
 
@@ -600,7 +600,7 @@ static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
 {
        if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
+               arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
        vdma_free(dma_addr);
 }
 
@@ -612,7 +612,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
 
        for_each_sg(sglist, sg, nents, i) {
                if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-                       arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
+                       arch_sync_dma_for_device(sg_phys(sg), sg->length,
                                dir);
                sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
                if (sg->dma_address == DMA_MAPPING_ERROR)
@@ -631,8 +631,7 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 
        for_each_sg(sglist, sg, nents, i) {
                if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-                       arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
-                               dir);
+                       arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
                vdma_free(sg->dma_address);
        }
 }
@@ -640,13 +639,13 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
 static void jazz_dma_sync_single_for_device(struct device *dev,
                dma_addr_t addr, size_t size, enum dma_data_direction dir)
 {
-       arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
+       arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
 }
 
 static void jazz_dma_sync_single_for_cpu(struct device *dev,
                dma_addr_t addr, size_t size, enum dma_data_direction dir)
 {
-       arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
+       arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
 }
 
 static void jazz_dma_sync_sg_for_device(struct device *dev,
@@ -656,7 +655,7 @@ static void jazz_dma_sync_sg_for_device(struct device *dev,
        int i;
 
        for_each_sg(sgl, sg, nents, i)
-               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+               arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
 }
 
 static void jazz_dma_sync_sg_for_cpu(struct device *dev,
@@ -666,7 +665,7 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev,
        int i;
 
        for_each_sg(sgl, sg, nents, i)
-               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+               arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
 }
 
 const struct dma_map_ops jazz_dma_ops = {
index 1d4d57dd9acf8ccff46780daa3107a16b1aad120..6cfacb04865fd82b5549ece832cf93335718d6e1 100644 (file)
@@ -27,7 +27,7 @@
  * R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
  * SGI IP32 aka O2.
  */
-static inline bool cpu_needs_post_dma_flush(struct device *dev)
+static inline bool cpu_needs_post_dma_flush(void)
 {
        switch (boot_cpu_type()) {
        case CPU_R10000:
@@ -118,17 +118,17 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
        } while (left);
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        dma_sync_phys(paddr, size, dir);
 }
 
 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
-       if (cpu_needs_post_dma_flush(dev))
+       if (cpu_needs_post_dma_flush())
                dma_sync_phys(paddr, size, dir);
 }
 #endif
index 4206d4b6c8cef40adfe7f59957e64012521fb35e..69d762182d49bfbd23ff57bc64e11b9328214519 100644 (file)
@@ -46,8 +46,8 @@ static inline void cache_op(phys_addr_t paddr, size_t size,
        } while (left);
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_FROM_DEVICE:
@@ -61,8 +61,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
        }
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_TO_DEVICE:
index 9cb238664584c6cef9b96809db406fed93beb2ec..0ed711e379020aa2adff898a50284734af5dcb8c 100644 (file)
@@ -18,8 +18,8 @@
 #include <linux/cache.h>
 #include <asm/cacheflush.h>
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        void *vaddr = phys_to_virt(paddr);
 
@@ -42,8 +42,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
        }
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        void *vaddr = phys_to_virt(paddr);
 
index 4d5b8bd1d795684ae51a9eb7cb6aa7acb729dda2..adec711ad39d5bafdab35cc07a87758cca4d5eef 100644 (file)
@@ -125,7 +125,7 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr,
        free_pages_exact(vaddr, size);
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
+void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
                enum dma_data_direction dir)
 {
        unsigned long cl;
index ca35d9a76e5062ea12e150310fdf3bda035e0893..a60d47fd4d55f7d598ce7033a93996ea3e385bc7 100644 (file)
@@ -439,14 +439,14 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
        free_pages((unsigned long)__va(dma_handle), order);
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
 }
index 2a82984356f81ffd3407361a50f8773adb272215..5ab4f868e919b8dd8f1efe75ddfefc94383a68e2 100644 (file)
@@ -104,14 +104,14 @@ static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
 #endif
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        __dma_sync_page(paddr, size, dir);
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        __dma_sync_page(paddr, size, dir);
 }
index b17514619b7e1f7de0d2df82efeaf0ac3b8144e8..eeb25a4fa55f24e91071403b87da8f6d014878ec 100644 (file)
@@ -25,7 +25,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
         * Pages from the page allocator may have data present in
         * cache. So flush the cache before using uncached memory.
         */
-       arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
+       arch_sync_dma_for_device(virt_to_phys(ret), size,
                        DMA_BIDIRECTIONAL);
 
        ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
@@ -59,8 +59,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
        iounmap(vaddr);
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
 
index b87e0002131dd097725141114d52a10178eea171..9d723c58557b2eaa2f1800ca6ba9a3ad72744a74 100644 (file)
@@ -368,8 +368,8 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
 
 /* IIep is write-through, not flushing on cpu to device transfer. */
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        if (dir != PCI_DMA_TODEVICE)
                dma_make_coherent(paddr, PAGE_ALIGN(size));
index 154979d62b73c7e3e6aa54cc7497244a9e6375c2..2b86a2a042368c674eaf62b2d3a44e2019ba5f5a 100644 (file)
@@ -44,8 +44,8 @@ static void do_cache_op(phys_addr_t paddr, size_t size,
                }
 }
 
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_BIDIRECTIONAL:
@@ -62,8 +62,8 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
        }
 }
 
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_BIDIRECTIONAL:
index 4fc8fb92d45ef80b5d5b640274c711c70864d3b8..651054aa871034f3973d44584c0bc11c803aa9f7 100644 (file)
@@ -660,7 +660,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
                return;
 
        phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
-       arch_sync_dma_for_cpu(dev, phys, size, dir);
+       arch_sync_dma_for_cpu(phys, size, dir);
 }
 
 static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -672,7 +672,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
                return;
 
        phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
-       arch_sync_dma_for_device(dev, phys, size, dir);
+       arch_sync_dma_for_device(phys, size, dir);
 }
 
 static void iommu_dma_sync_sg_for_cpu(struct device *dev,
@@ -686,7 +686,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
                return;
 
        for_each_sg(sgl, sg, nelems, i)
-               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+               arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
 }
 
 static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -700,7 +700,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
                return;
 
        for_each_sg(sgl, sg, nelems, i)
-               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+               arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
 }
 
 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -715,7 +715,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
        dma_handle =__iommu_dma_map(dev, phys, size, prot);
        if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
            dma_handle != DMA_MAPPING_ERROR)
-               arch_sync_dma_for_device(dev, phys, size, dir);
+               arch_sync_dma_for_device(phys, size, dir);
        return dma_handle;
 }
 
index 06346422f743210cd296ee003be4da6c8709d890..486d7978ea970e499970ecef6fed69bf22875e48 100644 (file)
@@ -411,7 +411,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
 
 done:
        if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               xen_dma_sync_for_device(dev, dev_addr, phys, size, dir);
+               xen_dma_sync_for_device(dev_addr, phys, size, dir);
        return dev_addr;
 }
 
@@ -431,7 +431,7 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
        BUG_ON(dir == DMA_NONE);
 
        if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir);
+               xen_dma_sync_for_cpu(dev_addr, paddr, size, dir);
 
        /* NOTE: We use dev_addr here, not paddr! */
        if (is_xen_swiotlb_buffer(dev_addr))
@@ -445,7 +445,7 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
        phys_addr_t paddr = xen_bus_to_phys(dma_addr);
 
        if (!dev_is_dma_coherent(dev))
-               xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
+               xen_dma_sync_for_cpu(dma_addr, paddr, size, dir);
 
        if (is_xen_swiotlb_buffer(dma_addr))
                swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
@@ -461,7 +461,7 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
                swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
 
        if (!dev_is_dma_coherent(dev))
-               xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir);
+               xen_dma_sync_for_device(dma_addr, paddr, size, dir);
 }
 
 /*
index dd3de6d88fc0814670f0f3396ec02e4be78da409..47d4830636627c35a36344eff4695963ac28629a 100644 (file)
@@ -75,29 +75,29 @@ static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
 #endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
 
 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir);
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir);
 #else
-static inline void arch_sync_dma_for_device(struct device *dev,
-               phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
 }
 #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
 
 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
-               size_t size, enum dma_data_direction dir);
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir);
 #else
-static inline void arch_sync_dma_for_cpu(struct device *dev,
-               phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir)
 {
 }
 #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
 
 #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
-void arch_sync_dma_for_cpu_all(struct device *dev);
+void arch_sync_dma_for_cpu_all(void);
 #else
-static inline void arch_sync_dma_for_cpu_all(struct device *dev)
+static inline void arch_sync_dma_for_cpu_all(void)
 {
 }
 #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
index d71380f6ed0b2c7570f37957baa24d453d825006..ffc0d3902b71735fb199bc69397f5514de1d088f 100644 (file)
@@ -4,10 +4,10 @@
 
 #include <linux/swiotlb.h>
 
-void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
-               phys_addr_t paddr, size_t size, enum dma_data_direction dir);
-void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
-               phys_addr_t paddr, size_t size, enum dma_data_direction dir);
+void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir);
+void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
+               enum dma_data_direction dir);
 
 extern int xen_swiotlb_init(int verbose, bool early);
 extern const struct dma_map_ops xen_swiotlb_dma_ops;
index f04cfc2e9e01a297fae331456bb5563544ba2bf3..4c21cdc15d1b8d84c416a84d138699fc9dbf9243 100644 (file)
@@ -232,7 +232,7 @@ void dma_direct_sync_single_for_device(struct device *dev,
                swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
 
        if (!dev_is_dma_coherent(dev))
-               arch_sync_dma_for_device(dev, paddr, size, dir);
+               arch_sync_dma_for_device(paddr, size, dir);
 }
 EXPORT_SYMBOL(dma_direct_sync_single_for_device);
 
@@ -250,7 +250,7 @@ void dma_direct_sync_sg_for_device(struct device *dev,
                                        dir, SYNC_FOR_DEVICE);
 
                if (!dev_is_dma_coherent(dev))
-                       arch_sync_dma_for_device(dev, paddr, sg->length,
+                       arch_sync_dma_for_device(paddr, sg->length,
                                        dir);
        }
 }
@@ -266,8 +266,8 @@ void dma_direct_sync_single_for_cpu(struct device *dev,
        phys_addr_t paddr = dma_to_phys(dev, addr);
 
        if (!dev_is_dma_coherent(dev)) {
-               arch_sync_dma_for_cpu(dev, paddr, size, dir);
-               arch_sync_dma_for_cpu_all(dev);
+               arch_sync_dma_for_cpu(paddr, size, dir);
+               arch_sync_dma_for_cpu_all();
        }
 
        if (unlikely(is_swiotlb_buffer(paddr)))
@@ -285,7 +285,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
                phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
 
                if (!dev_is_dma_coherent(dev))
-                       arch_sync_dma_for_cpu(dev, paddr, sg->length, dir);
+                       arch_sync_dma_for_cpu(paddr, sg->length, dir);
 
                if (unlikely(is_swiotlb_buffer(paddr)))
                        swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
@@ -293,7 +293,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
        }
 
        if (!dev_is_dma_coherent(dev))
-               arch_sync_dma_for_cpu_all(dev);
+               arch_sync_dma_for_cpu_all();
 }
 EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
 
@@ -345,7 +345,7 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
        }
 
        if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               arch_sync_dma_for_device(dev, phys, size, dir);
+               arch_sync_dma_for_device(phys, size, dir);
        return dma_addr;
 }
 EXPORT_SYMBOL(dma_direct_map_page);