]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net/mlx5e: Validate striding RQ before enabling XDP
authorMaxim Mikityanskiy <maximmi@nvidia.com>
Tue, 27 Sep 2022 20:36:00 +0000 (13:36 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 29 Sep 2022 02:36:35 +0000 (19:36 -0700)
Currently, the driver can silently fall back to legacy RQ after enabling
XDP, even if striding RQ was active before. It happens when PAGE_SIZE is
bigger than the maximum supported stride size. This commit changes this
behavior to more straightforward: if an operation (enabling XDP) doesn't
support the current parameters (striding RQ mode), it fails.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.h
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c

index 8b54fec04fefd318b629be67dd3ced2927a475fe..2be09cc3c4378883fb0ce2d3574d9c02bcace28f 100644 (file)
@@ -320,22 +320,27 @@ bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
                link_speed > MLX5E_SLOW_PCI_RATIO * pci_bw;
 }
 
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
-                               struct mlx5e_params *params)
+int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
 {
        if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
-               return false;
+               return -EOPNOTSUPP;
 
-       if (params->xdp_prog) {
-               /* XSK params are not considered here. If striding RQ is in use,
-                * and an XSK is being opened, mlx5e_rx_mpwqe_is_linear_skb will
-                * be called with the known XSK params.
-                */
-               if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
-                       return false;
-       }
+       if (params->xdp_prog && !mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
+               return -EINVAL;
+
+       return 0;
+}
 
-       return true;
+int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
+                            struct mlx5e_xsk_param *xsk)
+{
+       if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
+               return -EOPNOTSUPP;
+
+       if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+               return -EINVAL;
+
+       return 0;
 }
 
 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
@@ -356,8 +361,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
 
 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
 {
-       params->rq_wq_type = mlx5e_striding_rq_possible(mdev, params) &&
-               MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
+       params->rq_wq_type = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) ?
                MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
                MLX5_WQ_TYPE_CYCLIC;
 }
@@ -374,7 +378,7 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
         */
        if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
             MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
-           mlx5e_striding_rq_possible(mdev, params) &&
+           !mlx5e_mpwrq_validate_regular(mdev, params) &&
            (mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
             !mlx5e_rx_is_linear_skb(params, NULL)))
                MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
index 3cc1c6b164441b634d08b0048b3b1bc60b0bf699..6e86cbfc7b58cb0dafe066f99ee018827ad84f0e 100644 (file)
@@ -92,7 +92,9 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
 
 bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
-bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
+int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
+                            struct mlx5e_xsk_param *xsk);
 void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
index 98ed9ef3a6bdd02277eabdccade2ba380ca09b4a..0b3c9f10b597db8833bb1abc2566b054753feac6 100644 (file)
@@ -30,7 +30,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
         */
        switch (params->rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               return mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
+               return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
        default: /* MLX5_WQ_TYPE_CYCLIC */
                return mlx5e_rx_is_linear_skb(params, xsk);
        }
index 29ed20abc3da9f0c667f3bcfcedf6864ceaa2960..8ae5cff3361e913b119ba7c7951c3eae83b1f3a7 100644 (file)
@@ -1997,10 +1997,14 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
        struct mlx5e_params new_params;
 
        if (enable) {
-               if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
-                       return -EOPNOTSUPP;
-               if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
-                       return -EINVAL;
+               /* Checking the regular RQ here; mlx5e_validate_xsk_param called
+                * from mlx5e_open_xsk will check for each XSK queue, and
+                * mlx5e_safe_switch_params will be reverted if any check fails.
+                */
+               int err = mlx5e_mpwrq_validate_regular(mdev, &priv->channels.params);
+
+               if (err)
+                       return err;
        } else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
                netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n");
                return -EINVAL;
index 04af77092a297b70cc40d1459ee68f1c3fd393de..71df9891d7f8b923475653bdf9d3ac62167f1aca 100644 (file)
@@ -4582,8 +4582,20 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
 
        new_params = priv->channels.params;
        new_params.xdp_prog = prog;
-       if (reset)
-               mlx5e_set_rq_type(priv->mdev, &new_params);
+
+       /* XDP affects striding RQ parameters. Block XDP if striding RQ won't be
+        * supported with the new parameters: if PAGE_SIZE is bigger than
+        * MLX5_MPWQE_LOG_STRIDE_SZ_MAX, striding RQ can't be used, even though
+        * the MTU is small enough for the linear mode, because XDP uses strides
+        * of PAGE_SIZE on regular RQs.
+        */
+       if (reset && MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+               /* Checking for regular RQs here; XSK RQs were checked on XSK bind. */
+               err = mlx5e_mpwrq_validate_regular(priv->mdev, &new_params);
+               if (err)
+                       goto unlock;
+       }
+
        old_prog = priv->channels.params.xdp_prog;
 
        err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);