u32 mask;
u32 value;
- direction =
- (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE;
+ if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
+ direction = DMA_TO_DEVICE;
+ else
+ direction = DMA_FROM_DEVICE;
- map = pci_map_single(qdev->pdev, ptr, size, direction);
- if (pci_dma_mapping_error(qdev->pdev, map)) {
+ map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
+ if (dma_mapping_error(&qdev->pdev->dev, map)) {
netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
return -ENOMEM;
}
status = ql_wait_cfg(qdev, bit);
exit:
ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
- pci_unmap_single(qdev->pdev, map, size, direction);
+ dma_unmap_single(&qdev->pdev->dev, map, size, direction);
return status;
}
{
struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
- pci_dma_sync_single_for_cpu(qdev->pdev, lbq_desc->dma_addr,
- qdev->lbq_buf_size, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
+ qdev->lbq_buf_size, DMA_FROM_DEVICE);
if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
ql_lbq_block_size(qdev)) {
/* last chunk of the master page */
- pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
+ ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
}
return lbq_desc;
return -ENOMEM;
skb_reserve(skb, QLGE_SB_PAD);
- sbq_desc->dma_addr = pci_map_single(qdev->pdev, skb->data,
+ sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, sbq_desc->dma_addr)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
dev_kfree_skb_any(skb);
return -EIO;
page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
if (unlikely(!page))
return -ENOMEM;
- dma_addr = pci_map_page(qdev->pdev, page, 0,
+ dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(qdev->pdev, dma_addr)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
__free_pages(page, qdev->lbq_buf_order);
netif_err(qdev, drv, qdev->ndev,
"PCI mapping failed.\n");
qdev->ndev,
"unmapping OAL area.\n");
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
} else {
netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
"unmapping frag %d.\n", i);
- pci_unmap_page(qdev->pdev,
+ dma_unmap_page(&qdev->pdev->dev,
dma_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
dma_unmap_len(&tx_ring_desc->map[i],
- maplen), PCI_DMA_TODEVICE);
+ maplen), DMA_TO_DEVICE);
}
}
/*
* Map the skb buffer first.
*/
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping failed with error: %d\n", err);
* etc...
*/
/* Tack on the OAL in the eighth segment of IOCB. */
- map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
+ map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
sizeof(struct oal),
- PCI_DMA_TODEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ DMA_TO_DEVICE);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netif_err(qdev, tx_queued, qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
}
skb_reserve(new_skb, NET_IP_ALIGN);
- pci_dma_sync_single_for_cpu(qdev->pdev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
skb_put_data(new_skb, skb->data, length);
* Headers fit nicely into a small buffer.
*/
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, hdr_len);
skb_put(skb, hdr_len);
* buffer.
*/
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- pci_dma_sync_single_for_cpu(qdev->pdev,
- sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&qdev->pdev->dev,
+ sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE,
+ DMA_FROM_DEVICE);
skb_put_data(skb, sbq_desc->p.skb->data, length);
} else {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
skb_put(skb, length);
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
sbq_desc->p.skb = NULL;
}
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
"No skb available, drop the packet.\n");
return NULL;
}
- pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
qdev->lbq_buf_size,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_reserve(skb, NET_IP_ALIGN);
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
int size, i = 0;
sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
- SMALL_BUF_MAP_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
+ SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
/*
* This is an non TCP/UDP IP frame, so
static void ql_free_shadow_space(struct ql_adapter *qdev)
{
if (qdev->rx_ring_shadow_reg_area) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ PAGE_SIZE,
+ qdev->rx_ring_shadow_reg_area,
+ qdev->rx_ring_shadow_reg_dma);
qdev->rx_ring_shadow_reg_area = NULL;
}
if (qdev->tx_ring_shadow_reg_area) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->tx_ring_shadow_reg_area,
- qdev->tx_ring_shadow_reg_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ PAGE_SIZE,
+ qdev->tx_ring_shadow_reg_area,
+ qdev->tx_ring_shadow_reg_dma);
qdev->tx_ring_shadow_reg_area = NULL;
}
}
static int ql_alloc_shadow_space(struct ql_adapter *qdev)
{
qdev->rx_ring_shadow_reg_area =
- pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->rx_ring_shadow_reg_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
if (!qdev->rx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of RX shadow space failed.\n");
}
qdev->tx_ring_shadow_reg_area =
- pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->tx_ring_shadow_reg_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
if (!qdev->tx_ring_shadow_reg_area) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of TX shadow space failed.\n");
return 0;
err_wqp_sh_area:
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->rx_ring_shadow_reg_area,
- qdev->rx_ring_shadow_reg_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ PAGE_SIZE,
+ qdev->rx_ring_shadow_reg_area,
+ qdev->rx_ring_shadow_reg_dma);
return -ENOMEM;
}
struct tx_ring *tx_ring)
{
if (tx_ring->wq_base) {
- pci_free_consistent(qdev->pdev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
+ dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
+ tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
}
kfree(tx_ring->q);
struct tx_ring *tx_ring)
{
tx_ring->wq_base =
- pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
- &tx_ring->wq_base_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
+ &tx_ring->wq_base_dma, GFP_ATOMIC);
if (!tx_ring->wq_base ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
return 0;
err:
- pci_free_consistent(qdev->pdev, tx_ring->wq_size,
- tx_ring->wq_base, tx_ring->wq_base_dma);
+ dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
+ tx_ring->wq_base, tx_ring->wq_base_dma);
tx_ring->wq_base = NULL;
pci_alloc_err:
netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
&lbq->queue[lbq->next_to_clean];
if (lbq_desc->p.pg_chunk.offset == last_offset)
- pci_unmap_page(qdev->pdev, lbq_desc->dma_addr,
+ dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
ql_lbq_block_size(qdev),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
put_page(lbq_desc->p.pg_chunk.page);
lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
}
if (rx_ring->master_chunk.page) {
- pci_unmap_page(qdev->pdev, rx_ring->chunk_dma_addr,
- ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
+ ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
put_page(rx_ring->master_chunk.page);
rx_ring->master_chunk.page = NULL;
}
return;
}
if (sbq_desc->p.skb) {
- pci_unmap_single(qdev->pdev, sbq_desc->dma_addr,
+ dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
SMALL_BUF_MAP_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
}
__le64 *buf_ptr;
int i;
- bq->base = pci_alloc_consistent(qdev->pdev, QLGE_BQ_SIZE,
- &bq->base_dma);
+ bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
+ &bq->base_dma, GFP_ATOMIC);
if (!bq->base) {
netif_err(qdev, ifup, qdev->ndev,
"ring %u %s allocation failed.\n", rx_ring->cq_id,
{
/* Free the small buffer queue. */
if (rx_ring->sbq.base) {
- pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
- rx_ring->sbq.base, rx_ring->sbq.base_dma);
+ dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
+ rx_ring->sbq.base, rx_ring->sbq.base_dma);
rx_ring->sbq.base = NULL;
}
/* Free the large buffer queue. */
if (rx_ring->lbq.base) {
- pci_free_consistent(qdev->pdev, QLGE_BQ_SIZE,
- rx_ring->lbq.base, rx_ring->lbq.base_dma);
+ dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
+ rx_ring->lbq.base, rx_ring->lbq.base_dma);
rx_ring->lbq.base = NULL;
}
/* Free the rx queue. */
if (rx_ring->cq_base) {
- pci_free_consistent(qdev->pdev,
- rx_ring->cq_size,
- rx_ring->cq_base, rx_ring->cq_base_dma);
+ dma_free_coherent(&qdev->pdev->dev,
+ rx_ring->cq_size,
+ rx_ring->cq_base, rx_ring->cq_base_dma);
rx_ring->cq_base = NULL;
}
}
* Allocate the completion queue for this rx_ring.
*/
rx_ring->cq_base =
- pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
- &rx_ring->cq_base_dma);
+ dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
+ &rx_ring->cq_base_dma, GFP_ATOMIC);
if (!rx_ring->cq_base) {
netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
}
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
set_bit(QL_DMA64, &qdev->flags);
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
}
if (err) {