]> git.baikalelectronics.ru Git - kernel.git/commitdiff
dpaa2-eth: Add congestion group taildrop
authorIoana Radulescu <ruxandra.radulescu@nxp.com>
Sat, 30 May 2020 21:08:11 +0000 (00:08 +0300)
committerDavid S. Miller <davem@davemloft.net>
Mon, 1 Jun 2020 19:04:32 +0000 (12:04 -0700)
The increase in number of ingress frame queues means we now risk
depleting the buffer pool before the FQ taildrop kicks in.

Congestion group taildrop allows us to control the number of frames that
can accumulate on a group of Rx frame queues belonging to the same
traffic class.  This setting coexists with the frame queue based
taildrop: whichever limit gets hit first triggers the frame drop.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h

index c16c8ea3a174a7abdc72fbe3714790b30657caaa..04eff6308c72e303030b3cb407fd1ac583451960 100644 (file)
@@ -1287,17 +1287,20 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
        }
 }
 
-static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
+static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
+                                     bool tx_pause)
 {
        struct dpni_taildrop td = {0};
        struct dpaa2_eth_fq *fq;
        int i, err;
 
-       if (priv->rx_td_enabled == enable)
+       td.enable = !tx_pause;
+       if (priv->rx_td_enabled == td.enable)
                return;
 
-       td.enable = enable;
-       td.threshold = DPAA2_ETH_TAILDROP_THRESH;
+       /* FQ taildrop: threshold is in bytes, per frame queue */
+       td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
+       td.units = DPNI_CONGESTION_UNIT_BYTES;
 
        for (i = 0; i < priv->num_fqs; i++) {
                fq = &priv->fq[i];
@@ -1308,12 +1311,28 @@ static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
                                        fq->tc, fq->flowid, &td);
                if (err) {
                        netdev_err(priv->net_dev,
-                                  "dpni_set_taildrop() failed\n");
-                       break;
+                                  "dpni_set_taildrop(FQ) failed\n");
+                       return;
+               }
+       }
+
+       /* Congestion group taildrop: threshold is in frames, per group
+        * of FQs belonging to the same traffic class
+        */
+       td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
+       td.units = DPNI_CONGESTION_UNIT_FRAMES;
+       for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+               err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+                                       DPNI_CP_GROUP, DPNI_QUEUE_RX,
+                                       i, 0, &td);
+               if (err) {
+                       netdev_err(priv->net_dev,
+                                  "dpni_set_taildrop(CG) failed\n");
+                       return;
                }
        }
 
-       priv->rx_td_enabled = enable;
+       priv->rx_td_enabled = td.enable;
 }
 
 static int link_state_update(struct dpaa2_eth_priv *priv)
@@ -1334,7 +1353,7 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
         * only when pause frame generation is disabled.
         */
        tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
-       dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+       dpaa2_eth_set_rx_taildrop(priv, tx_pause);
 
        /* When we manage the MAC/PHY using phylink there is no need
         * to manually update the netif_carrier.
index 6384f6a23349b485e8e0b68f62e9b7918ccaab6a..184d5d83e497d6f5ed94f2db852e02b6537562ec 100644 (file)
@@ -40,7 +40,7 @@
  * frames in the Rx queues (length of the current frame is not
  * taken into account when making the taildrop decision)
  */
-#define DPAA2_ETH_TAILDROP_THRESH      (64 * 1024)
+#define DPAA2_ETH_FQ_TAILDROP_THRESH   (64 * 1024)
 
 /* Maximum number of Tx confirmation frames to be processed
  * in a single NAPI call
  * how many 64B frames fit inside the taildrop threshold and add a margin
  * to accommodate the buffer refill delay.
  */
-#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_FQ_TAILDROP_THRESH / 64)
 #define DPAA2_ETH_NUM_BUFS             (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
 #define DPAA2_ETH_REFILL_THRESH \
        (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
 
+/* Congestion group taildrop threshold: number of frames allowed to accumulate
+ * at any moment in a group of Rx queues belonging to the same traffic class.
+ * Choose value such that we don't risk depleting the buffer pool before the
+ * taildrop kicks in
+ */
+#define DPAA2_ETH_CG_TAILDROP_THRESH(priv)                             \
+       (DPAA2_ETH_MAX_FRAMES_PER_QUEUE * dpaa2_eth_queue_count(priv) / \
+        dpaa2_eth_tc_count(priv))
+
 /* Maximum number of buffers that can be acquired/released through a single
  * QBMan command
  */