#include "cudbg_lib_common.h"
#include "cudbg_entity.h"
#include "cudbg_lib.h"
+#include "cxgb4_tc_mqprio.h"
/* generic seq_file support for showing a table of size rows x width. */
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
static int sge_qinfo_show(struct seq_file *seq, void *v)
{
- int eth_entries, ctrl_entries, eo_entries = 0;
+ int eth_entries, ctrl_entries, eohw_entries = 0, eosw_entries = 0;
int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 };
int uld_txq_entries[CXGB4_TX_MAX] = { 0 };
const struct sge_uld_txq_info *utxq_info;
const struct sge_uld_rxq_info *urxq_info;
+ struct cxgb4_tc_port_mqprio *port_mqprio;
struct adapter *adap = seq->private;
- int i, n, r = (uintptr_t)v - 1;
+ int i, j, n, r = (uintptr_t)v - 1;
struct sge *s = &adap->sge;
eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
- if (adap->sge.eohw_txq)
- eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
-
- mutex_lock(&uld_mutex);
- if (s->uld_txq_info)
- for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
- uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
-
- if (s->uld_rxq_info) {
- for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
- uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
- uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
- }
- }
if (r)
seq_putc(seq, '\n');
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- goto unlock;
+ goto out;
}
r -= eth_entries;
- if (r < eo_entries) {
+ if (!adap->tc_mqprio)
+ goto skip_mqprio;
+
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+ if (!refcount_read(&adap->tc_mqprio->refcnt)) {
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto skip_mqprio;
+ }
+
+ eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
+ if (r < eohw_entries) {
int base_qset = r * 4;
const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset];
const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset];
RL("FLLow:", fl.low);
RL("FLStarving:", fl.starving);
- goto unlock;
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto out;
+ }
+
+ r -= eohw_entries;
+ for (j = 0; j < adap->params.nports; j++) {
+ int entries;
+ u8 tc;
+
+ port_mqprio = &adap->tc_mqprio->port_mqprio[j];
+ entries = 0;
+ for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
+ entries += port_mqprio->mqprio.qopt.count[tc];
+
+ if (!entries)
+ continue;
+
+ eosw_entries = DIV_ROUND_UP(entries, 4);
+ if (r < eosw_entries) {
+ const struct sge_eosw_txq *tx;
+
+ n = min(4, entries - 4 * r);
+ tx = &port_mqprio->eosw_txq[4 * r];
+
+ S("QType:", "EOSW-TXQ");
+ S("Interface:",
+ adap->port[j] ? adap->port[j]->name : "N/A");
+ T("EOTID:", hwtid);
+ T("HWQID:", hwqid);
+ T("State:", state);
+ T("Size:", ndesc);
+ T("In-Use:", inuse);
+ T("Credits:", cred);
+ T("Compl:", ncompl);
+ T("Last-Compl:", last_compl);
+ T("PIDX:", pidx);
+ T("Last-PIDX:", last_pidx);
+ T("CIDX:", cidx);
+ T("Last-CIDX:", last_cidx);
+ T("FLOWC-IDX:", flowc_idx);
+
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ goto out;
+ }
+
+ r -= eosw_entries;
+ }
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+
+skip_mqprio:
+ if (!is_uld(adap))
+ goto skip_uld;
+
+ mutex_lock(&uld_mutex);
+ if (s->uld_txq_info)
+ for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++)
+ uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i);
+
+ if (s->uld_rxq_info) {
+ for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) {
+ uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i);
+ uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i);
+ }
}
- r -= eo_entries;
if (r < uld_txq_entries[CXGB4_TX_OFLD]) {
const struct sge_uld_txq *tx;
}
r -= uld_txq_entries[CXGB4_TX_CRYPTO];
+ mutex_unlock(&uld_mutex);
+
+skip_uld:
if (r < ctrl_entries) {
const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4];
TL("TxQFull:", q.stops);
TL("TxQRestarts:", q.restarts);
- goto unlock;
+ goto out;
}
r -= ctrl_entries;
seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
s->counter_val[evtq->pktcnt_idx]);
- goto unlock;
+ goto out;
}
-unlock:
- mutex_unlock(&uld_mutex);
#undef R
#undef RL
#undef T
#undef R3
#undef T3
#undef S3
+out:
+ return 0;
+
+unlock:
+ mutex_unlock(&uld_mutex);
return 0;
}
static int sge_queue_entries(const struct adapter *adap)
{
- int tot_uld_entries = 0;
- int i;
+ int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0;
+
+ if (adap->tc_mqprio) {
+ struct cxgb4_tc_port_mqprio *port_mqprio;
+ u8 tc;
+
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+ if (adap->sge.eohw_txq)
+ eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4);
+
+ for (i = 0; i < adap->params.nports; i++) {
+ u32 entries = 0;
+
+ port_mqprio = &adap->tc_mqprio->port_mqprio[i];
+ for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++)
+ entries += port_mqprio->mqprio.qopt.count[tc];
+
+ if (entries)
+ eosw_entries += DIV_ROUND_UP(entries, 4);
+ }
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
+ }
if (!is_uld(adap))
goto lld_only;
lld_only:
return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
- (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) +
- tot_uld_entries +
+ eohw_entries + eosw_entries + tot_uld_entries +
DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
}
if (t->nhpftids)
seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base,
t->hpftid_base + t->nhpftids - 1);
+ if (t->neotids)
+ seq_printf(seq, "EOTID range: %u..%u, in use: %u\n",
+ t->eotid_base, t->eotid_base + t->neotids - 1,
+ atomic_read(&t->eotids_in_use));
if (t->ntids)
seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
int cxgb4_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio)
{
+ struct adapter *adap = netdev2adap(dev);
bool needs_bring_up = false;
int ret;
if (ret)
return ret;
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
+
/* To configure tc params, the current allocated EOTIDs must
* be freed up. However, they can't be freed up if there's
* traffic running on the interface. So, ensure interface is
if (needs_bring_up)
cxgb_open(dev);
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
return ret;
}
if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
return;
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
for_each_port(adap, i) {
dev = adap->port[i];
if (!dev)
cxgb4_mqprio_disable_offload(dev);
}
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
}
int cxgb4_init_tc_mqprio(struct adapter *adap)
goto out_free_mqprio;
}
+ mutex_init(&tc_mqprio->mqprio_mutex);
+
tc_mqprio->port_mqprio = tc_port_mqprio;
for (i = 0; i < adap->params.nports; i++) {
port_mqprio = &tc_mqprio->port_mqprio[i];
u8 i;
if (adap->tc_mqprio) {
+ mutex_lock(&adap->tc_mqprio->mqprio_mutex);
if (adap->tc_mqprio->port_mqprio) {
for (i = 0; i < adap->params.nports; i++) {
struct net_device *dev = adap->port[i];
}
kfree(adap->tc_mqprio->port_mqprio);
}
+ mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
kfree(adap->tc_mqprio);
}
}