* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
-#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_DMA_MINALIGN (128)
#ifndef __ASSEMBLY__
static inline int cache_line_size(void)
{
u32 cwg = cache_type_cwg();
- return cwg ? 4 << cwg : L1_CACHE_BYTES;
+ return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
}
#endif /* __ASSEMBLY__ */
void __init setup_cpu_features(void)
{
u32 cwg;
- int cls;
setup_system_capabilities();
mark_const_caps_ready();
* Check for sane CTR_EL0.CWG value.
*/
cwg = cache_type_cwg();
- cls = cache_line_size();
if (!cwg)
- pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
- cls);
- if (L1_CACHE_BYTES < cls)
- pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
- L1_CACHE_BYTES, cls);
+ pr_warn("No Cache Writeback Granule information, assuming %d\n",
+ ARCH_DMA_MINALIGN);
}
static bool __maybe_unused
max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb = 1;
+ WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
+ TAINT_CPU_OUT_OF_SPEC,
+ "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
+ ARCH_DMA_MINALIGN, cache_line_size());
+
return atomic_pool_init();
}
arch_initcall(arm64_dma_init);