#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
int i;
- u16 reg_idx;
+ u16 reg_idx, pool;
u8 tcs = adapter->hw_tcs;
/* verify we have DCB queueing enabled before proceeding */
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
+ pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
- for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+ for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
/* If we are greater than indices move to next pool */
- if ((reg_idx & ~vmdq->mask) >= tcs)
+ if ((reg_idx & ~vmdq->mask) >= tcs) {
+ pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ }
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
}
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
reg_idx++;
}
for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
adapter->tx_ring[offset + i]->reg_idx = tx_idx;
adapter->rx_ring[offset + i]->reg_idx = rx_idx;
+ adapter->rx_ring[offset + i]->netdev = adapter->netdev;
adapter->tx_ring[offset + i]->dcb_tc = tc;
adapter->rx_ring[offset + i]->dcb_tc = tc;
}
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
+ u16 reg_idx, pool;
int i;
- u16 reg_idx;
/* only proceed if VMDq is enabled */
if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
+ pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
#ifdef IXGBE_FCOE
break;
#endif
/* If we are greater than indices move to next pool */
- if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ if ((reg_idx & ~vmdq->mask) >= rss->indices) {
+ pool++;
reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ }
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = pool ? NULL : adapter->netdev;
}
#ifdef IXGBE_FCOE
/* FCoE uses a linear block of queues so just assigning 1:1 */
- for (; i < adapter->num_rx_queues; i++, reg_idx++)
+ for (; i < adapter->num_rx_queues; i++, reg_idx++) {
adapter->rx_ring[i]->reg_idx = reg_idx;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
+ }
#endif
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
{
int i, reg_idx;
- for (i = 0; i < adapter->num_rx_queues; i++)
+ for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i]->reg_idx = i;
+ adapter->rx_ring[i]->netdev = adapter->netdev;
+ }
for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
adapter->tx_ring[i]->reg_idx = reg_idx;
for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
if (IS_ERR(skb))
return true;
- /* verify that the packet does not have any known errors */
- if (unlikely(ixgbe_test_staterr(rx_desc,
- IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
- !(netdev->features & NETIF_F_RXALL))) {
+ /* Verify netdev is present, and that packet does not have any
+ * errors that would be unacceptable to the netdev.
+ */
+ if (!netdev ||
+ (unlikely(ixgbe_test_staterr(rx_desc,
+ IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
+ !(netdev->features & NETIF_F_RXALL)))) {
dev_kfree_skb_any(skb);
return true;
}
rx_ring->next_to_use = 0;
}
-static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
- struct ixgbe_ring *rx_ring)
-{
- struct ixgbe_adapter *adapter = vadapter->real_adapter;
-
- /* shutdown specific queue receive and wait for dma to settle */
- ixgbe_disable_rx_queue(adapter, rx_ring);
- usleep_range(10000, 20000);
- ixgbe_irq_disable_queues(adapter, BIT_ULL(rx_ring->queue_index));
- ixgbe_clean_rx_ring(rx_ring);
-}
-
-static int ixgbe_fwd_ring_down(struct net_device *vdev,
- struct ixgbe_fwd_adapter *accel)
-{
- struct ixgbe_adapter *adapter = accel->real_adapter;
- unsigned int rxbase = accel->rx_base_queue;
- int i;
-
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
- adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
- }
-
- return 0;
-}
-
static int ixgbe_fwd_ring_up(struct net_device *vdev,
struct ixgbe_fwd_adapter *accel)
{
accel->tx_base_queue = baseq;
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[baseq + i]);
-
- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
adapter->rx_ring[baseq + i]->netdev = vdev;
- ixgbe_configure_rx_ring(adapter, adapter->rx_ring[baseq + i]);
- }
+
+ /* Guarantee all rings are updated before we update the
+ * MAC address filter.
+ */
+ wmb();
/* ixgbe_add_mac_filter will return an index if it succeeds, so we
* need to only treat it as an error value if it is negative.
*/
err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
VMDQ_P(accel->pool));
- if (err < 0)
- goto fwd_queue_err;
+ if (err >= 0) {
+ ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ return 0;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
+ adapter->rx_ring[baseq + i]->netdev = NULL;
- ixgbe_macvlan_set_rx_mode(vdev, VMDQ_P(accel->pool), adapter);
- return 0;
-fwd_queue_err:
- ixgbe_fwd_ring_down(vdev, accel);
return err;
}
static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
{
- struct ixgbe_fwd_adapter *fwd_adapter = priv;
- struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
- unsigned int limit;
+ struct ixgbe_fwd_adapter *accel = priv;
+ struct ixgbe_adapter *adapter = accel->real_adapter;
+ unsigned int rxbase = accel->rx_base_queue;
+ unsigned int limit, i;
- clear_bit(fwd_adapter->pool, adapter->fwd_bitmask);
+ /* delete unicast filter associated with offloaded interface */
+ ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
+ VMDQ_P(accel->pool));
+ /* disable ability to receive packets for this pool */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
+
+ /* Allow remaining Rx packets to get flushed out of the
+ * Rx FIFO before we drop the netdev for the ring.
+ */
+ usleep_range(10000, 20000);
+
+ for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[rxbase + i];
+ struct ixgbe_q_vector *qv = ring->q_vector;
+
+ /* Make sure we aren't processing any packets and clear
+ * netdev to shut down the ring.
+ */
+ if (netif_running(adapter->netdev))
+ napi_synchronize(&qv->napi);
+ ring->netdev = NULL;
+ }
+
+ clear_bit(accel->pool, adapter->fwd_bitmask);
limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools);
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
- ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
/* go back to full RSS if we're done with our VMQs */
if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
ixgbe_setup_tc(pdev, adapter->hw_tcs);
netdev_dbg(pdev, "pool %i:%i queues %i:%i\n",
- fwd_adapter->pool, adapter->num_rx_pools,
- fwd_adapter->rx_base_queue,
- fwd_adapter->rx_base_queue +
+ accel->pool, adapter->num_rx_pools,
+ accel->rx_base_queue,
+ accel->rx_base_queue +
adapter->num_rx_queues_per_pool);
- kfree(fwd_adapter);
+ kfree(accel);
}
#define IXGBE_MAX_MAC_HDR_LEN 127