#define writew_relaxed(x, addr) writew(x, addr)
#define writel_relaxed(x, addr) writel(x, addr)
-extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
- unsigned long cacheflag);
+void __iomem *ioremap(unsigned long physaddr, unsigned long size);
extern void __iounmap(void __iomem *addr);
-static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
-{
- return __ioremap(physaddr, size, 0);
-}
-
-static inline void __iomem *ioremap_nocache(unsigned long physaddr,
- unsigned long size)
-{
- return __ioremap(physaddr, size, 0);
-}
-
static inline void iounmap(void __iomem *addr)
{
__iounmap(addr);
}
-#define ioremap_nocache ioremap_nocache
-#define ioremap_wc ioremap_nocache
-#define ioremap_wt ioremap_nocache
+#define ioremap_nocache ioremap
+#define ioremap_wc ioremap
+#define ioremap_wt ioremap
/* Pages to physical address... */
#define page_to_phys(page) virt_to_phys(page_to_virt(page))
/*
* Map some physical address range into the kernel address space.
*/
-void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
- unsigned long cacheflag)
+void __iomem *ioremap(unsigned long phys_addr, unsigned long size)
{
struct vm_struct *area;
unsigned long offset;
return NULL;
}
- /*
- * Map uncached objects in the low part of address space to
- * CONFIG_NIOS2_IO_REGION_BASE
- */
- if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
- IS_MAPPABLE_UNCACHEABLE(last_addr) &&
- !(cacheflag & _PAGE_CACHED))
- return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
-
/* Mappings have to be page-aligned */
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
if (!area)
return NULL;
addr = area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size,
- cacheflag)) {
+ if (remap_area_pages((unsigned long) addr, phys_addr, size, 0)) {
vunmap(addr);
return NULL;
}
return (void __iomem *) (offset + (char *)addr);
}
-EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(ioremap);
/*
* __iounmap unmaps nearly everything, so be careful