return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
}
+static inline bool
+mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe);
+}
+
int mlx5e_priv_init(struct mlx5e_priv *priv,
struct net_device *netdev,
struct mlx5_core_dev *mdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
- if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
+ if (enable && !mlx5e_tx_mpwqe_supported(mdev))
return -EOPNOTSUPP;
new_params = priv->channels.params;
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE,
- MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* XDP SQ */
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
- MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* set CQE compression */
params->rx_cqe_compress_def = false;