}
}
-static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
+static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
+ bool tx_pause)
{
struct dpni_taildrop td = {0};
struct dpaa2_eth_fq *fq;
int i, err;
- if (priv->rx_td_enabled == enable)
+ td.enable = !tx_pause;
+ if (priv->rx_td_enabled == td.enable)
return;
- td.enable = enable;
- td.threshold = DPAA2_ETH_TAILDROP_THRESH;
+ /* FQ taildrop: threshold is in bytes, per frame queue */
+ td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
+ td.units = DPNI_CONGESTION_UNIT_BYTES;
for (i = 0; i < priv->num_fqs; i++) {
fq = &priv->fq[i];
fq->tc, fq->flowid, &td);
if (err) {
netdev_err(priv->net_dev,
- "dpni_set_taildrop() failed\n");
- break;
+ "dpni_set_taildrop(FQ) failed\n");
+ return;
+ }
+ }
+
+ /* Congestion group taildrop: threshold is in frames, per group
+ * of FQs belonging to the same traffic class
+ */
+ td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
+ td.units = DPNI_CONGESTION_UNIT_FRAMES;
+ for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
+ err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
+ DPNI_CP_GROUP, DPNI_QUEUE_RX,
+ i, 0, &td);
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_set_taildrop(CG) failed\n");
+ return;
}
}
- priv->rx_td_enabled = enable;
+ priv->rx_td_enabled = td.enable;
}
static int link_state_update(struct dpaa2_eth_priv *priv)
* only when pause frame generation is disabled.
*/
tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
- dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
+ dpaa2_eth_set_rx_taildrop(priv, tx_pause);
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
* frames in the Rx queues (length of the current frame is not
* taken into account when making the taildrop decision)
*/
-#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
+#define DPAA2_ETH_FQ_TAILDROP_THRESH (64 * 1024)
/* Maximum number of Tx confirmation frames to be processed
* in a single NAPI call
* how many 64B frames fit inside the taildrop threshold and add a margin
* to accommodate the buffer refill delay.
*/
-#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_FQ_TAILDROP_THRESH / 64)
#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
#define DPAA2_ETH_REFILL_THRESH \
(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
+/* Congestion group taildrop threshold: number of frames allowed to accumulate
+ * at any moment in a group of Rx queues belonging to the same traffic class.
+ * Choose value such that we don't risk depleting the buffer pool before the
+ * taildrop kicks in
+ */
+#define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
+ (DPAA2_ETH_MAX_FRAMES_PER_QUEUE * dpaa2_eth_queue_count(priv) / \
+ dpaa2_eth_tc_count(priv))
+
/* Maximum number of buffers that can be acquired/released through a single
* QBMan command
*/