]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net/mlx5e: Don't cache tunnel offloads capability
authorParav Pandit <parav@nvidia.com>
Fri, 12 Mar 2021 13:21:29 +0000 (07:21 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Mar 2023 12:33:48 +0000 (13:33 +0100)
[ Upstream commit ef1d1cb6e5539398f92495d00c9f3b30c5438386 ]

When mlx5e attaches again after device health recovery, the device
capabilities might have changed by the eswitch manager.

For example in one flow when ECPF changes the eswitch mode between
legacy and switchdev, it updates the flow table tunnel capability.

The cached value is only used in one place, so just check the capability
there instead.

Fixes: d40a2f36915a ("net/mlx5: Enable host PF HCA after eswitch is initialized")
Signed-off-by: Parav Pandit <parav@nvidia.com>
Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c

index 26a23047f1f3b5e722517fec703e2a9f82278787..bc76fe6b06230df029d8ccc7348c463380fff7ef 100644 (file)
@@ -313,7 +313,6 @@ struct mlx5e_params {
                } channel;
        } mqprio;
        bool rx_cqe_compress_def;
-       bool tunneled_offload_en;
        struct dim_cq_moder rx_cq_moderation;
        struct dim_cq_moder tx_cq_moderation;
        struct mlx5e_packet_merge_param packet_merge;
index 142ed2d98cd5d77a89ef7f64e0a32b33e2d6a67b..609a49c1e09e6fb094d257d9bb8cc51a98f6ef61 100644 (file)
@@ -4911,8 +4911,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
        /* TX inline */
        mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
 
-       params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev);
-
        /* AF_XDP */
        params->xsk = xsk;
 
@@ -5216,7 +5214,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
        }
 
        features = MLX5E_RX_RES_FEATURE_PTP;
-       if (priv->channels.params.tunneled_offload_en)
+       if (mlx5_tunnel_inner_ft_supported(mdev))
                features |= MLX5E_RX_RES_FEATURE_INNER_FT;
        err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
                                priv->max_nch, priv->drop_rq.rqn,
index 794cd8dfe9c911f7c6480c877cb81bc6beac96fa..0f744131c6869681d707ad4655c92b6fe7703bc4 100644 (file)
@@ -707,7 +707,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
        mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
 
        params->mqprio.num_tc       = 1;
-       params->tunneled_offload_en = false;
        if (rep->vport != MLX5_VPORT_UPLINK)
                params->vlan_strip_disable = true;
 
index 038ae0fcf9d458a908ee339eeb433510cff7e812..aed4e896179a36cfa4aaae695f9ac2f8ef0e3769 100644 (file)
@@ -70,7 +70,6 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
 
        params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
        params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
-       params->tunneled_offload_en = false;
 
        /* CQE compression is not supported for IPoIB */
        params->rx_cqe_compress_def = false;