if (gc->max_num_queues > resp.max_rq)
gc->max_num_queues = resp.max_rq;
+ /* The Hardware Channel (HWC) used 1 MSI-X */
+ if (gc->max_num_queues > gc->num_msix_usable - 1)
+ gc->max_num_queues = gc->num_msix_usable - 1;
+
return 0;
}
struct gdma_resource *r;
unsigned int msi_index;
unsigned long flags;
- int err;
+ struct device *dev;
+ int err = 0;
gc = gd->gdma_context;
r = &gc->msix_resource;
+ dev = gc->dev;
spin_lock_irqsave(&r->lock, flags);
msi_index = find_first_zero_bit(r->map, r->size);
- if (msi_index >= r->size) {
+ if (msi_index >= r->size || msi_index >= gc->num_msix_usable) {
err = -ENOSPC;
} else {
bitmap_set(r->map, msi_index, 1);
queue->eq.msix_index = msi_index;
- err = 0;
}
spin_unlock_irqrestore(&r->lock, flags);
- if (err)
- return err;
+ if (err) {
+ dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u",
+ err, msi_index, r->size, gc->num_msix_usable);
- WARN_ON(msi_index >= gc->num_msix_usable);
+ return err;
+ }
gic = &gc->irq_contexts[msi_index];
req.protocol_ver_min = GDMA_PROTOCOL_FIRST;
req.protocol_ver_max = GDMA_PROTOCOL_LAST;
+ req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1;
+ req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2;
+ req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
+ req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
+
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
max_queues_per_port = MANA_MAX_NUM_QUEUES;
- max_irqs = max_queues_per_port * MAX_PORTS_IN_MANA_DEV;
-
/* Need 1 interrupt for the Hardware communication Channel (HWC) */
- max_irqs++;
+ max_irqs = max_queues_per_port + 1;
nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
if (nvec < 0)
int bar = 0;
int err;
+ /* Each port has 2 CQs, each CQ has at most 1 EQE at a time */
+ BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
+
err = pci_enable_device(pdev);
if (err)
return -ENXIO;
resp.hdr.status);
}
-static void mana_destroy_eq(struct gdma_context *gc,
- struct mana_port_context *apc)
+static void mana_destroy_eq(struct mana_context *ac)
{
+ struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct gdma_queue *eq;
int i;
- if (!apc->eqs)
+ if (!ac->eqs)
return;
- for (i = 0; i < apc->num_queues; i++) {
- eq = apc->eqs[i].eq;
+ for (i = 0; i < gc->max_num_queues; i++) {
+ eq = ac->eqs[i].eq;
if (!eq)
continue;
mana_gd_destroy_queue(gc, eq);
}
- kfree(apc->eqs);
- apc->eqs = NULL;
+ kfree(ac->eqs);
+ ac->eqs = NULL;
}
-static int mana_create_eq(struct mana_port_context *apc)
+static int mana_create_eq(struct mana_context *ac)
{
- struct gdma_dev *gd = apc->ac->gdma_dev;
+ struct gdma_dev *gd = ac->gdma_dev;
+ struct gdma_context *gc = gd->gdma_context;
struct gdma_queue_spec spec = {};
int err;
int i;
- apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq),
- GFP_KERNEL);
- if (!apc->eqs)
+ ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
+ GFP_KERNEL);
+ if (!ac->eqs)
return -ENOMEM;
spec.type = GDMA_EQ;
spec.monitor_avl_buf = false;
spec.queue_size = EQ_SIZE;
spec.eq.callback = NULL;
- spec.eq.context = apc->eqs;
+ spec.eq.context = ac->eqs;
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
- spec.eq.ndev = apc->ndev;
- for (i = 0; i < apc->num_queues; i++) {
- err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq);
+ for (i = 0; i < gc->max_num_queues; i++) {
+ err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
if (err)
goto out;
}
return 0;
out:
- mana_destroy_eq(gd->gdma_context, apc);
+ mana_destroy_eq(ac);
return err;
}
static int mana_create_txq(struct mana_port_context *apc,
struct net_device *net)
{
- struct gdma_dev *gd = apc->ac->gdma_dev;
+ struct mana_context *ac = apc->ac;
+ struct gdma_dev *gd = ac->gdma_dev;
struct mana_obj_spec wq_spec;
struct mana_obj_spec cq_spec;
struct gdma_queue_spec spec;
spec.monitor_avl_buf = false;
spec.queue_size = cq_size;
spec.cq.callback = mana_schedule_napi;
- spec.cq.parent_eq = apc->eqs[i].eq;
+ spec.cq.parent_eq = ac->eqs[i].eq;
spec.cq.context = cq;
err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
if (err)
static int mana_add_rx_queues(struct mana_port_context *apc,
struct net_device *ndev)
{
+ struct mana_context *ac = apc->ac;
struct mana_rxq *rxq;
int err = 0;
int i;
for (i = 0; i < apc->num_queues; i++) {
- rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev);
+ rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
if (!rxq) {
err = -ENOMEM;
goto out;
int mana_alloc_queues(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
- struct gdma_dev *gd = apc->ac->gdma_dev;
int err;
- err = mana_create_eq(apc);
- if (err)
- return err;
-
err = mana_create_vport(apc, ndev);
if (err)
- goto destroy_eq;
+ return err;
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
if (err)
destroy_vport:
mana_destroy_vport(apc);
-destroy_eq:
- mana_destroy_eq(gd->gdma_context, apc);
return err;
}
mana_destroy_vport(apc);
- mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc);
-
return 0;
}
apc->ac = ac;
apc->ndev = ndev;
apc->max_queues = gc->max_num_queues;
- apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES);
+ apc->num_queues = gc->max_num_queues;
apc->port_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
ac->num_ports = 1;
gd->driver_data = ac;
+ err = mana_create_eq(ac);
+ if (err)
+ goto out;
+
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
MANA_MICRO_VERSION, &ac->num_ports);
if (err)
free_netdev(ndev);
}
+
+ mana_destroy_eq(ac);
+
out:
mana_gd_deregister_device(gd);
gd->driver_data = NULL;