#define IONIC_LIF_NAME_MAX_SZ 32
struct ionic_lif {
- char name[IONIC_LIF_NAME_MAX_SZ];
- struct list_head list;
struct net_device *netdev;
DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE);
struct ionic *ionic;
- bool registered;
unsigned int index;
unsigned int hw_index;
- unsigned int kern_pid;
- u64 __iomem *kern_dbpage;
struct mutex queue_lock; /* lock for queue structures */
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_tx_stats *txqstats;
struct ionic_qcq **rxqcqs;
struct ionic_rx_stats *rxqstats;
+ struct ionic_deferred deferred;
+ struct work_struct tx_timeout_work;
u64 last_eid;
+ unsigned int kern_pid;
+ u64 __iomem *kern_dbpage;
unsigned int neqs;
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
u32 rx_copybreak;
- u32 tx_budget;
unsigned int rx_mode;
u64 hw_features;
+ bool registered;
bool mc_overflow;
- unsigned int nmcast;
bool uc_overflow;
u16 lif_type;
+ unsigned int nmcast;
unsigned int nucast;
+ char name[IONIC_LIF_NAME_MAX_SZ];
union ionic_lif_identity *identity;
struct ionic_lif_info *info;
u32 rss_ind_tbl_sz;
struct ionic_rx_filters rx_filters;
- struct ionic_deferred deferred;
- unsigned long *dbid_inuse;
- unsigned int dbid_count;
- struct dentry *dentry;
u32 rx_coalesce_usecs; /* what the user asked for */
u32 rx_coalesce_hw; /* what the hw is using */
u32 tx_coalesce_usecs; /* what the user asked for */
u32 tx_coalesce_hw; /* what the hw is using */
+ unsigned long *dbid_inuse;
+ unsigned int dbid_count;
- struct work_struct tx_timeout_work;
+ struct dentry *dentry;
};
struct ionic_queue_params {
struct device *dev;
netdev = lif->netdev;
- dev = lif->ionic->dev;
+ dev = q->dev;
stats = q_to_rx_stats(q);
if (unlikely(!buf_info)) {
struct ionic_buf_info *buf_info)
{
struct net_device *netdev = q->lif->netdev;
- struct device *dev = q->lif->ionic->dev;
+ struct device *dev = q->dev;
if (unlikely(!buf_info)) {
net_err_ratelimited("%s: %s invalid buf_info in free\n",
struct ionic_cq_info *cq_info)
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
- struct device *dev = q->lif->ionic->dev;
struct ionic_buf_info *buf_info;
+ struct device *dev = q->dev;
struct sk_buff *skb;
unsigned int i;
u16 frag_len;
struct ionic_cq_info *cq_info)
{
struct ionic_rxq_comp *comp = cq_info->cq_desc;
- struct device *dev = q->lif->ionic->dev;
struct ionic_buf_info *buf_info;
+ struct device *dev = q->dev;
struct sk_buff *skb;
u16 len;
struct ionic_rxq_sg_elem *sg_elem;
struct ionic_buf_info *buf_info;
struct ionic_rxq_desc *desc;
- unsigned int max_sg_elems;
unsigned int remain_len;
unsigned int frag_len;
unsigned int nfrags;
unsigned int len;
len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
- max_sg_elems = q->lif->qtype_info[IONIC_QTYPE_RXQ].max_sg_elems;
for (i = ionic_q_space_avail(q); i; i--) {
nfrags = 0;
/* fill sg descriptors - buf[1..n] */
sg_desc = desc_info->sg_desc;
- for (j = 0; remain_len > 0 && j < max_sg_elems; j++) {
+ for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
sg_elem = &sg_desc->elems[j];
if (!buf_info->page) { /* alloc a new sg buffer? */
if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
}
/* clear end sg element as a sentinel */
- if (j < max_sg_elems) {
+ if (j < q->max_sg_elems) {
sg_elem = &sg_desc->elems[j];
memset(sg_elem, 0, sizeof(*sg_elem));
}
idev = &lif->ionic->idev;
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
+ tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
ionic_tx_service, NULL, NULL);
rx_work_done = ionic_cq_service(rxcq, budget,
void *data, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct device *dev = q->lif->ionic->dev;
+ struct device *dev = q->dev;
dma_addr_t dma_addr;
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
size_t offset, size_t len)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct device *dev = q->lif->ionic->dev;
+ struct device *dev = q->dev;
dma_addr_t dma_addr;
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_txq_desc *desc = desc_info->desc;
- struct device *dev = q->lif->ionic->dev;
+ struct device *dev = q->dev;
u8 opcode, flags, nsge;
u16 queue_index;
unsigned int i;
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_desc_info *rewind_desc_info;
- struct device *dev = q->lif->ionic->dev;
struct ionic_txq_sg_elem *elem;
+ struct device *dev = q->dev;
struct ionic_txq_desc *desc;
unsigned int frag_left = 0;
unsigned int offset = 0;
{
struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct device *dev = q->lif->ionic->dev;
+ struct device *dev = q->dev;
dma_addr_t dma_addr;
bool has_vlan;
u8 flags = 0;
{
struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct device *dev = q->lif->ionic->dev;
+ struct device *dev = q->dev;
dma_addr_t dma_addr;
bool has_vlan;
u8 flags = 0;
unsigned int len_left = skb->len - skb_headlen(skb);
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct device *dev = q->lif->ionic->dev;
+ struct device *dev = q->dev;
dma_addr_t dma_addr;
skb_frag_t *frag;
u16 len;
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
- int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
int err;
return (skb->len / skb_shinfo(skb)->gso_size) + 1;
/* If non-TSO, just need 1 desc and nr_frags sg elems */
- if (skb_shinfo(skb)->nr_frags <= sg_elems)
+ if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
return 1;
/* Too many frags, so linearize */