]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net/mlx5e: Add missing sanity checks for max TX WQE size
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Thu, 3 Nov 2022 06:55:42 +0000 (23:55 -0700)
committerSaeed Mahameed <saeedm@nvidia.com>
Wed, 9 Nov 2022 18:30:43 +0000 (10:30 -0800)
The commit cited below started using the firmware capability for the
maximum TX WQE size. This commit adds an important check to verify that
the driver doesn't attempt to exceed this capability, and also restores
another check mistakenly removed in the cited commit (a WQE must not
exceed the page size).

Fixes: c27bd1718c06 ("net/mlx5e: Read max WQEBBs on the SQ from firmware")
Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

index cb164b62f54365664ac52d541a5cb6f19fd0d0ff..853f312cd757257a84f297ce1ef1a91dc5c7d4c8 100644 (file)
 
 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
 
+/* IPSEC inline data includes:
+ * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for
+ *    next header.
+ * 2. ESP authentication data: 16 bytes for ICV.
+ */
+#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \
+                                          255 + 1 + 1 + 16, MLX5_SEND_WQE_DS)
+
+/* 366 should be big enough to cover all L2, L3 and L4 headers with possible
+ * encapsulations.
+ */
+#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \
+                                           MLX5_SEND_WQE_DS)
+
+/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */
+#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \
+                                        MLX5E_MAX_TX_INLINE_DS + \
+                                        MLX5E_MAX_TX_IPSEC_DS + \
+                                        MAX_SKB_FRAGS + 1, \
+                                        MLX5_SEND_WQEBB_NUM_DS)
+
 #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
 
 static inline
@@ -424,6 +445,8 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
 
 static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
 {
+       WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));
+
        /* A WQE must not cross the page boundary, hence two conditions:
         * 1. Its size must not exceed the page size.
         * 2. If the WQE size is X, and the space remaining in a page is less
@@ -436,7 +459,6 @@ static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_si
                  "wqe_size %u is greater than max SQ WQEBBs %u",
                  wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
 
-
        return MLX5E_STOP_ROOM(wqe_size);
 }
 
index 364f04309149225b3a8795d315cd619fb838b62b..e3a4f01bcceb196283bbe0bcce05022d320374d3 100644 (file)
@@ -5694,6 +5694,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
                mlx5e_fs_set_state_destroy(priv->fs,
                                           !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
 
+       /* Validate the max_wqe_size_sq capability. */
+       if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) {
+               mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n",
+                              mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS);
+               return -EIO;
+       }
+
        /* max number of channels may have changed */
        max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
        if (priv->channels.params.num_channels > max_nch) {
index 6adca01fbdc9dda097591722244e5f1555026351..f7897ddb29c52667c52eafcee83314a765ad1f6e 100644 (file)
@@ -305,6 +305,8 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
        u16 ds_cnt_inl = 0;
        u16 ds_cnt_ids = 0;
 
+       /* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */
+
        if (attr->insz)
                ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
                                          MLX5_SEND_WQE_DS);
@@ -317,6 +319,9 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at
                        inl += VLAN_HLEN;
 
                ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+               if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS))
+                       netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl,
+                                   (u16)MLX5E_MAX_TX_INLINE_DS);
                ds_cnt += ds_cnt_inl;
        }