** flush/purge and allocate "regular" cacheable pages for everything.
*/
-#define DMA_ERROR_CODE (~(dma_addr_t)0)
-
#ifdef CONFIG_PA11
extern const struct dma_map_ops pcxl_dma_ops;
extern const struct dma_map_ops pcx_dma_ops;
#define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */
#define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */
+#define CCIO_MAPPING_ERROR (~(dma_addr_t)0)
+
struct ioa_registers {
/* Runway Supervisory Set */
int32_t unused1[12];
BUG_ON(!dev);
ioc = GET_IOC(dev);
if (!ioc)
- return DMA_ERROR_CODE;
+ return CCIO_MAPPING_ERROR;
BUG_ON(size <= 0);
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
}
+static int ccio_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == CCIO_MAPPING_ERROR;
+}
+
static const struct dma_map_ops ccio_ops = {
.dma_supported = ccio_dma_supported,
.alloc = ccio_alloc,
.unmap_page = ccio_unmap_page,
.map_sg = ccio_map_sg,
.unmap_sg = ccio_unmap_sg,
+ .mapping_error = ccio_mapping_error,
};
#ifdef CONFIG_PROC_FS
#define DEFAULT_DMA_HINT_REG 0
+#define SBA_MAPPING_ERROR (~(dma_addr_t)0)
+
struct sba_device *sba_list;
EXPORT_SYMBOL_GPL(sba_list);
ioc = GET_IOC(dev);
if (!ioc)
- return DMA_ERROR_CODE;
+ return SBA_MAPPING_ERROR;
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
}
+static int sba_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == SBA_MAPPING_ERROR;
+}
+
static const struct dma_map_ops sba_ops = {
.dma_supported = sba_dma_supported,
.alloc = sba_alloc,
.unmap_page = sba_unmap_page,
.map_sg = sba_map_sg,
.unmap_sg = sba_unmap_sg,
+ .mapping_error = sba_mapping_error,
};