dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
- if (unlikely(dma_mapping_error(ring->dev, dma))) {
+ if (unlikely(dma_mapping_error(dev, dma))) {
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
struct hns3_enet_ring *next;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_queue *tqp;
- char ring_name[HNS3_RING_NAME_LEN];
struct device *dev; /* will be used for DMA mapping of descriptors */
/* statistic */
dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */
- u16 max_desc_num_per_pkt;
- u16 max_raw_data_sz_per_desc;
- u16 max_pkt_size;
int next_to_use; /* idx of next spare desc */
/* idx of lastest sent desc, the ring is empty when equal to
u32 flag; /* ring attribute */
- int numa_node;
- cpumask_t affinity_mask;
-
int pending_buf;
struct sk_buff *skb;
struct sk_buff *tail_skb;
#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
-#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
+#define ring_to_dev(ring) ((ring)->dev)
#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE)