entry, le32_to_cpu(txdesc->status));
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
- dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
+ dma_unmap_single(&mdp->pdev->dev,
+ le32_to_cpu(txdesc->addr),
le32_to_cpu(txdesc->len) >> 16,
DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
if (mdp->rx_skbuff[i]) {
struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
- dma_unmap_single(&ndev->dev,
+ dma_unmap_single(&mdp->pdev->dev,
le32_to_cpu(rxdesc->addr),
ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
/* The size of the buffer is a multiple of 32 bytes. */
buf_len = ALIGN(mdp->rx_buf_sz, 32);
- dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
+ dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb);
break;
}
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
- dma_unmap_single(&ndev->dev, dma_addr,
+ dma_unmap_single(&mdp->pdev->dev, dma_addr,
ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
if (skb == NULL)
break; /* Better luck next round. */
sh_eth_set_receive_align(skb);
- dma_addr = dma_map_single(&ndev->dev, skb->data,
+ dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
buf_len, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb);
break;
}
/* soft swap. */
if (!mdp->cd->hw_swap)
sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
- dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+ dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}