]> git.baikalelectronics.ru Git - kernel.git/commitdiff
sfc/siena: Rename RX/TX functions to avoid conflicts with sfc
authorMartin Habets <habetsm.xilinx@gmail.com>
Mon, 9 May 2022 15:32:20 +0000 (16:32 +0100)
committerJakub Kicinski <kuba@kernel.org>
Tue, 10 May 2022 22:38:14 +0000 (15:38 -0700)
For siena use efx_siena_ as the function prefix.
Several functions are not used in Siena, so they are removed.

Use a Siena specific variable name for module parameter
efx_separate_tx_channels.
Move efx_fini_tx_queue() to avoid a forward declaration of
efx_dequeue_buffer().

Signed-off-by: Martin Habets <habetsm.xilinx@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
13 files changed:
drivers/net/ethernet/sfc/siena/efx.c
drivers/net/ethernet/sfc/siena/efx.h
drivers/net/ethernet/sfc/siena/efx_channels.c
drivers/net/ethernet/sfc/siena/efx_common.c
drivers/net/ethernet/sfc/siena/ethtool_common.c
drivers/net/ethernet/sfc/siena/farch.c
drivers/net/ethernet/sfc/siena/rx.c
drivers/net/ethernet/sfc/siena/rx_common.c
drivers/net/ethernet/sfc/siena/rx_common.h
drivers/net/ethernet/sfc/siena/tx.c
drivers/net/ethernet/sfc/siena/tx.h
drivers/net/ethernet/sfc/siena/tx_common.c
drivers/net/ethernet/sfc/siena/tx_common.h

index ca41c038f3ab78cfebdfa663233254b9a46f970d..d94e2438ae3ae6f5569bc0a86675f04b03f475fe 100644 (file)
@@ -58,8 +58,9 @@ MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
  *
  * This is only used in MSI-X interrupt mode
  */
-bool efx_separate_tx_channels;
-module_param(efx_separate_tx_channels, bool, 0444);
+bool efx_siena_separate_tx_channels;
+module_param_named(efx_separate_tx_channels, efx_siena_separate_tx_channels,
+                  bool, 0444);
 MODULE_PARM_DESC(efx_separate_tx_channels,
                 "Use separate channels for TX and RX");
 
@@ -306,7 +307,7 @@ static int efx_probe_nic(struct efx_nic *efx)
        if (efx->n_channels > 1)
                netdev_rss_key_fill(efx->rss_context.rx_hash_key,
                                    sizeof(efx->rss_context.rx_hash_key));
-       efx_set_default_rx_indir_table(efx, &efx->rss_context);
+       efx_siena_set_default_rx_indir_table(efx, &efx->rss_context);
 
        /* Initialise the interrupt moderation settings */
        efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
@@ -366,7 +367,7 @@ static int efx_probe_all(struct efx_nic *efx)
                           " VFs may not function\n", rc);
 #endif
 
-       rc = efx_probe_filters(efx);
+       rc = efx_siena_probe_filters(efx);
        if (rc) {
                netif_err(efx, probe, efx->net_dev,
                          "failed to create filter tables\n");
@@ -380,7 +381,7 @@ static int efx_probe_all(struct efx_nic *efx)
        return 0;
 
  fail5:
-       efx_remove_filters(efx);
+       efx_siena_remove_filters(efx);
  fail4:
 #ifdef CONFIG_SFC_SRIOV
        efx->type->vswitching_remove(efx);
@@ -400,7 +401,7 @@ static void efx_remove_all(struct efx_nic *efx)
        rtnl_unlock();
 
        efx_siena_remove_channels(efx);
-       efx_remove_filters(efx);
+       efx_siena_remove_filters(efx);
 #ifdef CONFIG_SFC_SRIOV
        efx->type->vswitching_remove(efx);
 #endif
@@ -602,7 +603,7 @@ static const struct net_device_ops efx_netdev_ops = {
        .ndo_get_phys_port_name = efx_siena_get_phys_port_name,
        .ndo_setup_tc           = efx_siena_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
-       .ndo_rx_flow_steer      = efx_filter_rfs,
+       .ndo_rx_flow_steer      = efx_siena_filter_rfs,
 #endif
        .ndo_xdp_xmit           = efx_xdp_xmit,
        .ndo_bpf                = efx_xdp
index a4f9e6e962b0f802be1239d82ca624dca996956b..f91f3c94a2750b440baf017f21d75df91e292bba 100644 (file)
@@ -44,7 +44,7 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
  * TSO skbs.
  */
 #define EFX_RXQ_MIN_ENT                128U
-#define EFX_TXQ_MIN_ENT(efx)   (2 * efx_tx_max_skb_descs(efx))
+#define EFX_TXQ_MIN_ENT(efx)   (2 * efx_siena_tx_max_skb_descs(efx))
 
 /* All EF10 architecture NICs steal one bit of the DMAQ size for various
  * other purposes when counting TxQ entries, so we halve the queue size.
@@ -78,7 +78,7 @@ static inline bool efx_rss_enabled(struct efx_nic *efx)
  *
  * 2. If the existing filters have higher priority, return -%EPERM.
  *
- * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
+ * 3. If !efx_siena_filter_is_mc_recipient(@spec), or the NIC does not
  *    support delivery to multiple recipients, return -%EEXIST.
  *
  * This implies that filters for multiple multicast recipients must
index b04affb23f72b2f2fc5b2726ad7622b24920ea01..799c0a90358ce6e161a9b13a1d0039721fae3c59 100644 (file)
@@ -138,7 +138,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
        int n_xdp_tx;
        int n_xdp_ev;
 
-       if (efx_separate_tx_channels)
+       if (efx_siena_separate_tx_channels)
                n_channels *= 2;
        n_channels += extra_channels;
 
@@ -220,7 +220,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
        /* Ignore XDP tx channels when creating rx channels. */
        n_channels -= efx->n_xdp_channels;
 
-       if (efx_separate_tx_channels) {
+       if (efx_siena_separate_tx_channels) {
                efx->n_tx_channels =
                        min(max(n_channels / 2, 1U),
                            efx->max_tx_channels);
@@ -321,7 +321,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
 
        /* Assume legacy interrupts */
        if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
-               efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
+               efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
                efx->n_rx_channels = 1;
                efx->n_tx_channels = 1;
                efx->n_xdp_channels = 0;
@@ -521,7 +521,8 @@ static void efx_filter_rfs_expire(struct work_struct *data)
        channel = container_of(dwork, struct efx_channel, filter_work);
        time = jiffies - channel->rfs_last_expiry;
        quota = channel->rfs_filter_count * time / (30 * HZ);
-       if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
+       if (quota >= 20 && __efx_siena_filter_rfs_expire(channel,
+                                       min(channel->rfs_filter_count, quota)))
                channel->rfs_last_expiry += time;
        /* Ensure we do more work eventually even if NAPI poll is not happening */
        schedule_delayed_work(dwork, 30 * HZ);
@@ -558,7 +559,7 @@ static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
 
        rx_queue = &channel->rx_queue;
        rx_queue->efx = efx;
-       timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+       timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
 
        return channel;
 }
@@ -631,7 +632,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
        rx_queue = &channel->rx_queue;
        rx_queue->buffer = NULL;
        memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
-       timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+       timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
 #ifdef CONFIG_RFS_ACCEL
        INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
 #endif
@@ -657,13 +658,13 @@ static int efx_probe_channel(struct efx_channel *channel)
                goto fail;
 
        efx_for_each_channel_tx_queue(tx_queue, channel) {
-               rc = efx_probe_tx_queue(tx_queue);
+               rc = efx_siena_probe_tx_queue(tx_queue);
                if (rc)
                        goto fail;
        }
 
        efx_for_each_channel_rx_queue(rx_queue, channel) {
-               rc = efx_probe_rx_queue(rx_queue);
+               rc = efx_siena_probe_rx_queue(rx_queue);
                if (rc)
                        goto fail;
        }
@@ -751,9 +752,9 @@ void efx_siena_remove_channel(struct efx_channel *channel)
                  "destroy chan %d\n", channel->channel);
 
        efx_for_each_channel_rx_queue(rx_queue, channel)
-               efx_remove_rx_queue(rx_queue);
+               efx_siena_remove_rx_queue(rx_queue);
        efx_for_each_channel_tx_queue(tx_queue, channel)
-               efx_remove_tx_queue(tx_queue);
+               efx_siena_remove_tx_queue(tx_queue);
        efx_remove_eventq(channel);
        channel->type->post_remove(channel);
 }
@@ -963,7 +964,7 @@ int efx_siena_set_channels(struct efx_nic *efx)
        int rc;
 
        efx->tx_channel_offset =
-               efx_separate_tx_channels ?
+               efx_siena_separate_tx_channels ?
                efx->n_channels - efx->n_tx_channels : 0;
 
        if (efx->xdp_tx_queue_count) {
@@ -1130,15 +1131,15 @@ void efx_siena_start_channels(struct efx_nic *efx)
 
        efx_for_each_channel_rev(channel, efx) {
                efx_for_each_channel_tx_queue(tx_queue, channel) {
-                       efx_init_tx_queue(tx_queue);
+                       efx_siena_init_tx_queue(tx_queue);
                        atomic_inc(&efx->active_queues);
                }
 
                efx_for_each_channel_rx_queue(rx_queue, channel) {
-                       efx_init_rx_queue(rx_queue);
+                       efx_siena_init_rx_queue(rx_queue);
                        atomic_inc(&efx->active_queues);
                        efx_siena_stop_eventq(channel);
-                       efx_fast_push_rx_descriptors(rx_queue, false);
+                       efx_siena_fast_push_rx_descriptors(rx_queue, false);
                        efx_siena_start_eventq(channel);
                }
 
@@ -1184,9 +1185,9 @@ void efx_siena_stop_channels(struct efx_nic *efx)
 
        efx_for_each_channel(channel, efx) {
                efx_for_each_channel_rx_queue(rx_queue, channel)
-                       efx_fini_rx_queue(rx_queue);
+                       efx_siena_fini_rx_queue(rx_queue);
                efx_for_each_channel_tx_queue(tx_queue, channel)
-                       efx_fini_tx_queue(tx_queue);
+                       efx_siena_fini_tx_queue(tx_queue);
        }
 }
 
@@ -1228,7 +1229,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
                        efx_channel_get_rx_queue(channel);
 
                efx_rx_flush_packet(channel);
-               efx_fast_push_rx_descriptors(rx_queue, true);
+               efx_siena_fast_push_rx_descriptors(rx_queue, true);
        }
 
        /* Update BQL */
index fb6fb345cc566373e458f05eab3ce2ea5e9c12ba..f245d03c4caac6f8feca5f4617374a4ca343d3e2 100644 (file)
@@ -395,7 +395,7 @@ static void efx_start_datapath(struct efx_nic *efx)
                efx->rx_buffer_order = get_order(rx_buf_len);
        }
 
-       efx_rx_config_page_split(efx);
+       efx_siena_rx_config_page_split(efx);
        if (efx->rx_buffer_order)
                netif_dbg(efx, drv, efx->net_dev,
                          "RX buf len=%u; page order=%u batch=%u\n",
@@ -428,7 +428,7 @@ static void efx_start_datapath(struct efx_nic *efx)
         * the ring completely.  We wake it when half way back to
         * empty.
         */
-       efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+       efx->txq_stop_thresh = efx->txq_entries - efx_siena_tx_max_skb_descs(efx);
        efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
 
        /* Initialise the channels */
index e177b58e0664ec058d26199887325c4cc6048c4f..f54510cf4e728440c0e85985ce2ccf4b601b0e1b 100644 (file)
@@ -824,7 +824,8 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
 
                mutex_lock(&efx->rss_lock);
                if (info->flow_type & FLOW_RSS && info->rss_context) {
-                       ctx = efx_find_rss_context_entry(efx, info->rss_context);
+                       ctx = efx_siena_find_rss_context_entry(efx,
+                                                       info->rss_context);
                        if (!ctx) {
                                rc = -ENOENT;
                                goto out_unlock;
@@ -1213,7 +1214,7 @@ int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
                return -EOPNOTSUPP;
 
        mutex_lock(&efx->rss_lock);
-       ctx = efx_find_rss_context_entry(efx, rss_context);
+       ctx = efx_siena_find_rss_context_entry(efx, rss_context);
        if (!ctx) {
                rc = -ENOENT;
                goto out_unlock;
@@ -1257,18 +1258,18 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
                        rc = -EINVAL;
                        goto out_unlock;
                }
-               ctx = efx_alloc_rss_context_entry(efx);
+               ctx = efx_siena_alloc_rss_context_entry(efx);
                if (!ctx) {
                        rc = -ENOMEM;
                        goto out_unlock;
                }
                ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
                /* Initialise indir table and key to defaults */
-               efx_set_default_rx_indir_table(efx, ctx);
+               efx_siena_set_default_rx_indir_table(efx, ctx);
                netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
                allocated = true;
        } else {
-               ctx = efx_find_rss_context_entry(efx, *rss_context);
+               ctx = efx_siena_find_rss_context_entry(efx, *rss_context);
                if (!ctx) {
                        rc = -ENOENT;
                        goto out_unlock;
@@ -1279,7 +1280,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
                /* delete this context */
                rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
                if (!rc)
-                       efx_free_rss_context_entry(ctx);
+                       efx_siena_free_rss_context_entry(ctx);
                goto out_unlock;
        }
 
@@ -1290,7 +1291,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
 
        rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
        if (rc && allocated)
-               efx_free_rss_context_entry(ctx);
+               efx_siena_free_rss_context_entry(ctx);
        else
                *rss_context = ctx->user_id;
 out_unlock:
index 6ee6ca192a44726fe861ce2ccd4593f095e67a52..4de36c6c28e1177899fc8c21cbb4f601319be539 100644 (file)
@@ -1160,7 +1160,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
                /* The queue must be empty, so we won't receive any rx
                 * events, so efx_process_channel() won't refill the
                 * queue. Refill it here */
-               efx_fast_push_rx_descriptors(rx_queue, true);
+               efx_siena_fast_push_rx_descriptors(rx_queue, true);
        } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
                efx_farch_handle_drain_event(channel);
        } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
@@ -2925,13 +2925,14 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
                         */
                        arfs_id = 0;
                } else {
-                       rule = efx_rps_hash_find(efx, &spec);
+                       rule = efx_siena_rps_hash_find(efx, &spec);
                        if (!rule) {
                                /* ARFS table doesn't know of this filter, remove it */
                                force = true;
                        } else {
                                arfs_id = rule->arfs_id;
-                               if (!efx_rps_check_rule(rule, index, &force))
+                               if (!efx_siena_rps_check_rule(rule, index,
+                                                             &force))
                                        goto out_unlock;
                        }
                }
@@ -2939,7 +2940,7 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
                                                 flow_id, arfs_id)) {
                        if (rule)
                                rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
-                       efx_rps_hash_del(efx, &spec);
+                       efx_siena_rps_hash_del(efx, &spec);
                        efx_farch_filter_table_clear_entry(efx, table, index);
                        ret = true;
                }
index 099cb23e32503705ec34bf5f7f09501cca95ac10..47c09b93f7c4b8688c8f795c97c1c5521c9a981e 100644 (file)
@@ -157,7 +157,7 @@ void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
         */
        if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
                efx_rx_flush_packet(channel);
-               efx_discard_rx_packet(channel, rx_buf, n_frags);
+               efx_siena_discard_rx_packet(channel, rx_buf, n_frags);
                return;
        }
 
@@ -195,7 +195,7 @@ void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 
        /* All fragments have been DMA-synced, so recycle pages. */
        rx_buf = efx_rx_buffer(rx_queue, index);
-       efx_recycle_rx_pages(channel, rx_buf, n_frags);
+       efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
 
        /* Pipeline receives so that we give time for packet headers to be
         * prefetched into cache.
@@ -217,7 +217,7 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
                struct efx_rx_queue *rx_queue;
 
                rx_queue = efx_channel_get_rx_queue(channel);
-               efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+               efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
                return;
        }
        skb_record_rx_queue(skb, channel->rx_queue.core_index);
@@ -268,8 +268,8 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
 
        if (unlikely(channel->rx_pkt_n_frags > 1)) {
                /* We can't do XDP on fragmented packets - drop. */
-               efx_free_rx_buffers(rx_queue, rx_buf,
-                                   channel->rx_pkt_n_frags);
+               efx_siena_free_rx_buffers(rx_queue, rx_buf,
+                                         channel->rx_pkt_n_frags);
                if (net_ratelimit())
                        netif_err(efx, rx_err, efx->net_dev,
                                  "XDP is not possible with multiple receive fragments (%d)\n",
@@ -312,7 +312,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
                xdpf = xdp_convert_buff_to_frame(&xdp);
                err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true);
                if (unlikely(err != 1)) {
-                       efx_free_rx_buffers(rx_queue, rx_buf, 1);
+                       efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
                        if (net_ratelimit())
                                netif_err(efx, rx_err, efx->net_dev,
                                          "XDP TX failed (%d)\n", err);
@@ -326,7 +326,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
        case XDP_REDIRECT:
                err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
                if (unlikely(err)) {
-                       efx_free_rx_buffers(rx_queue, rx_buf, 1);
+                       efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
                        if (net_ratelimit())
                                netif_err(efx, rx_err, efx->net_dev,
                                          "XDP redirect failed (%d)\n", err);
@@ -339,7 +339,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
 
        default:
                bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
-               efx_free_rx_buffers(rx_queue, rx_buf, 1);
+               efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
                channel->n_rx_xdp_bad_drops++;
                trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
                break;
@@ -348,7 +348,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
                trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
                fallthrough;
        case XDP_DROP:
-               efx_free_rx_buffers(rx_queue, rx_buf, 1);
+               efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
                channel->n_rx_xdp_drops++;
                break;
        }
@@ -379,8 +379,8 @@ void __efx_siena_rx_packet(struct efx_channel *channel)
 
                efx_loopback_rx_packet(efx, eh, rx_buf->len);
                rx_queue = efx_channel_get_rx_queue(channel);
-               efx_free_rx_buffers(rx_queue, rx_buf,
-                                   channel->rx_pkt_n_frags);
+               efx_siena_free_rx_buffers(rx_queue, rx_buf,
+                                         channel->rx_pkt_n_frags);
                goto out;
        }
 
index 9fb442da043cca5b5c43ace22519348336cc78e8..4579f43484c3675963de1ad8c7753a200c680fe1 100644 (file)
@@ -30,6 +30,9 @@ MODULE_PARM_DESC(rx_refill_threshold,
  */
 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
 
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+                               struct efx_rx_buffer *rx_buf);
+
 /* Check the RX page recycle ring for a page that can be reused. */
 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
 {
@@ -103,9 +106,9 @@ static void efx_recycle_rx_page(struct efx_channel *channel,
 }
 
 /* Recycle the pages that are used by buffers that have just been received. */
-void efx_recycle_rx_pages(struct efx_channel *channel,
-                         struct efx_rx_buffer *rx_buf,
-                         unsigned int n_frags)
+void efx_siena_recycle_rx_pages(struct efx_channel *channel,
+                               struct efx_rx_buffer *rx_buf,
+                               unsigned int n_frags)
 {
        struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
 
@@ -118,15 +121,15 @@ void efx_recycle_rx_pages(struct efx_channel *channel,
        } while (--n_frags);
 }
 
-void efx_discard_rx_packet(struct efx_channel *channel,
-                          struct efx_rx_buffer *rx_buf,
-                          unsigned int n_frags)
+void efx_siena_discard_rx_packet(struct efx_channel *channel,
+                                struct efx_rx_buffer *rx_buf,
+                                unsigned int n_frags)
 {
        struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
 
-       efx_recycle_rx_pages(channel, rx_buf, n_frags);
+       efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
 
-       efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+       efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
 }
 
 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
@@ -181,12 +184,12 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
        /* If this is the last buffer in a page, unmap and free it. */
        if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
                efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
-               efx_free_rx_buffers(rx_queue, rx_buf, 1);
+               efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
        }
        rx_buf->page = NULL;
 }
 
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue)
 {
        struct efx_nic *efx = rx_queue->efx;
        unsigned int entries;
@@ -217,7 +220,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
        return rc;
 }
 
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue)
 {
        unsigned int max_fill, trigger, max_trigger;
        struct efx_nic *efx = rx_queue->efx;
@@ -272,7 +275,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
        efx_nic_init_rx(rx_queue);
 }
 
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
 {
        struct efx_rx_buffer *rx_buf;
        int i;
@@ -301,7 +304,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
        rx_queue->xdp_rxq_info_valid = false;
 }
 
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue)
 {
        netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
                  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
@@ -315,8 +318,8 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
 /* Unmap a DMA-mapped page.  This function is only called for the final RX
  * buffer in a page.
  */
-void efx_unmap_rx_buffer(struct efx_nic *efx,
-                        struct efx_rx_buffer *rx_buf)
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+                               struct efx_rx_buffer *rx_buf)
 {
        struct page *page = rx_buf->page;
 
@@ -330,9 +333,9 @@ void efx_unmap_rx_buffer(struct efx_nic *efx,
        }
 }
 
-void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
-                        struct efx_rx_buffer *rx_buf,
-                        unsigned int num_bufs)
+void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
+                              struct efx_rx_buffer *rx_buf,
+                              unsigned int num_bufs)
 {
        do {
                if (rx_buf->page) {
@@ -343,7 +346,7 @@ void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
        } while (--num_bufs);
 }
 
-void efx_rx_slow_fill(struct timer_list *t)
+void efx_siena_rx_slow_fill(struct timer_list *t)
 {
        struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
 
@@ -352,7 +355,7 @@ void efx_rx_slow_fill(struct timer_list *t)
        ++rx_queue->slow_fill_count;
 }
 
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
 {
        mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
 }
@@ -425,7 +428,7 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
        return 0;
 }
 
-void efx_rx_config_page_split(struct efx_nic *efx)
+void efx_siena_rx_config_page_split(struct efx_nic *efx)
 {
        efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
                                      EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
@@ -439,7 +442,7 @@ void efx_rx_config_page_split(struct efx_nic *efx)
                                               efx->rx_bufs_per_page);
 }
 
-/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
+/* efx_siena_fast_push_rx_descriptors - push new RX descriptors quickly
  * @rx_queue:          RX descriptor queue
  *
  * This will aim to fill the RX descriptor queue up to
@@ -450,7 +453,8 @@ void efx_rx_config_page_split(struct efx_nic *efx)
  * this means this function must run from the NAPI handler, or be called
  * when NAPI is disabled.
  */
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
+void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
+                                       bool atomic)
 {
        struct efx_nic *efx = rx_queue->efx;
        unsigned int fill_level, batch_size;
@@ -517,7 +521,7 @@ efx_siena_rx_packet_gro(struct efx_channel *channel,
                struct efx_rx_queue *rx_queue;
 
                rx_queue = efx_channel_get_rx_queue(channel);
-               efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+               efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
                return;
        }
 
@@ -556,7 +560,7 @@ efx_siena_rx_packet_gro(struct efx_channel *channel,
 /* RSS contexts.  We're using linked lists and crappy O(n) algorithms, because
  * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
  */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
+struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx)
 {
        struct list_head *head = &efx->rss_context.list;
        struct efx_rss_context *ctx, *new;
@@ -589,7 +593,8 @@ struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
        return new;
 }
 
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
+struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
+                                                        u32 id)
 {
        struct list_head *head = &efx->rss_context.list;
        struct efx_rss_context *ctx;
@@ -602,14 +607,14 @@ struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
        return NULL;
 }
 
-void efx_free_rss_context_entry(struct efx_rss_context *ctx)
+void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx)
 {
        list_del(&ctx->list);
        kfree(ctx);
 }
 
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
-                                   struct efx_rss_context *ctx)
+void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
+                                         struct efx_rss_context *ctx)
 {
        size_t i;
 
@@ -619,7 +624,7 @@ void efx_set_default_rx_indir_table(struct efx_nic *efx,
 }
 
 /**
- * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * efx_siena_filter_is_mc_recipient - test whether spec is a multicast recipient
  * @spec: Specification to test
  *
  * Return: %true if the specification is a non-drop RX filter that
@@ -627,7 +632,7 @@ void efx_set_default_rx_indir_table(struct efx_nic *efx,
  * IPv4 or IPv6 address value in the respective multicast address
  * range.  Otherwise %false.
  */
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec)
 {
        if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
            spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
@@ -652,8 +657,8 @@ bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
        return false;
 }
 
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
-                          const struct efx_filter_spec *right)
+bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
+                                const struct efx_filter_spec *right)
 {
        if ((left->match_flags ^ right->match_flags) |
            ((left->flags ^ right->flags) &
@@ -665,7 +670,7 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left,
                      offsetof(struct efx_filter_spec, outer_vid)) == 0;
 }
 
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec)
 {
        BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
        return jhash2((const u32 *)&spec->outer_vid,
@@ -675,8 +680,8 @@ u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
 }
 
 #ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
-                       bool *force)
+bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
+                             unsigned int filter_idx, bool *force)
 {
        if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
                /* ARFS is currently updating this entry, leave it */
@@ -692,7 +697,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
        } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
                /* ARFS has moved on, so old filter is not needed.  Since we did
                 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
-                * not be removed by efx_rps_hash_del() subsequently.
+                * not be removed by efx_siena_rps_hash_del() subsequently.
                 */
                *force = true;
                return true;
@@ -705,7 +710,7 @@ static
 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
                                       const struct efx_filter_spec *spec)
 {
-       u32 hash = efx_filter_spec_hash(spec);
+       u32 hash = efx_siena_filter_spec_hash(spec);
 
        lockdep_assert_held(&efx->rps_hash_lock);
        if (!efx->rps_hash_table)
@@ -713,7 +718,7 @@ struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
        return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
 }
 
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
                                        const struct efx_filter_spec *spec)
 {
        struct efx_arfs_rule *rule;
@@ -725,15 +730,15 @@ struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
                return NULL;
        hlist_for_each(node, head) {
                rule = container_of(node, struct efx_arfs_rule, node);
-               if (efx_filter_spec_equal(spec, &rule->spec))
+               if (efx_siena_filter_spec_equal(spec, &rule->spec))
                        return rule;
        }
        return NULL;
 }
 
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
-                                      const struct efx_filter_spec *spec,
-                                      bool *new)
+static struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+                                       const struct efx_filter_spec *spec,
+                                       bool *new)
 {
        struct efx_arfs_rule *rule;
        struct hlist_head *head;
@@ -744,7 +749,7 @@ struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
                return NULL;
        hlist_for_each(node, head) {
                rule = container_of(node, struct efx_arfs_rule, node);
-               if (efx_filter_spec_equal(spec, &rule->spec)) {
+               if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
                        *new = false;
                        return rule;
                }
@@ -758,7 +763,8 @@ struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
        return rule;
 }
 
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
+void efx_siena_rps_hash_del(struct efx_nic *efx,
+                           const struct efx_filter_spec *spec)
 {
        struct efx_arfs_rule *rule;
        struct hlist_head *head;
@@ -769,7 +775,7 @@ void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
                return;
        hlist_for_each(node, head) {
                rule = container_of(node, struct efx_arfs_rule, node);
-               if (efx_filter_spec_equal(spec, &rule->spec)) {
+               if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
                        /* Someone already reused the entry.  We know that if
                         * this check doesn't fire (i.e. filter_id == REMOVING)
                         * then the REMOVING mark was put there by our caller,
@@ -788,7 +794,7 @@ void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
 }
 #endif
 
-int efx_probe_filters(struct efx_nic *efx)
+int efx_siena_probe_filters(struct efx_nic *efx)
 {
        int rc;
 
@@ -835,7 +841,7 @@ out_unlock:
        return rc;
 }
 
-void efx_remove_filters(struct efx_nic *efx)
+void efx_siena_remove_filters(struct efx_nic *efx)
 {
 #ifdef CONFIG_RFS_ACCEL
        struct efx_channel *channel;
@@ -870,7 +876,7 @@ static void efx_filter_rfs_work(struct work_struct *data)
                rc %= efx->type->max_rx_ip_filters;
        if (efx->rps_hash_table) {
                spin_lock_bh(&efx->rps_hash_lock);
-               rule = efx_rps_hash_find(efx, &req->spec);
+               rule = efx_siena_rps_hash_find(efx, &req->spec);
                /* The rule might have already gone, if someone else's request
                 * for the same spec was already worked and then expired before
                 * we got around to our work.  In that case we have nothing
@@ -930,8 +936,9 @@ static void efx_filter_rfs_work(struct work_struct *data)
                /* We're overloading the NIC's filter tables, so let's do a
                 * chunk of extra expiry work.
                 */
-               __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
-                                                    100u));
+               __efx_siena_filter_rfs_expire(channel,
+                                             min(channel->rfs_filter_count,
+                                                 100u));
        }
 
        /* Release references */
@@ -939,8 +946,8 @@ static void efx_filter_rfs_work(struct work_struct *data)
        dev_put(req->net_dev);
 }
 
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
-                  u16 rxq_index, u32 flow_id)
+int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                        u16 rxq_index, u32 flow_id)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_async_filter_insertion *req;
@@ -1041,7 +1048,8 @@ out_clear:
        return rc;
 }
 
-bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
+bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
+                                  unsigned int quota)
 {
        bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
        struct efx_nic *efx = channel->efx;
index 909d06a4fdc9fff88f5a4374e27aac075f589eba..6b37f83ecb30b8f4e951ff81bacd9c691bb4b066 100644 (file)
@@ -43,26 +43,19 @@ static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
 #endif
 }
 
-void efx_rx_slow_fill(struct timer_list *t);
-
-void efx_recycle_rx_pages(struct efx_channel *channel,
-                         struct efx_rx_buffer *rx_buf,
-                         unsigned int n_frags);
-void efx_discard_rx_packet(struct efx_channel *channel,
-                          struct efx_rx_buffer *rx_buf,
-                          unsigned int n_frags);
-
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
-
-void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
-                       struct page *page,
-                       unsigned int page_offset,
-                       u16 flags);
-void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
+void efx_siena_rx_slow_fill(struct timer_list *t);
+
+void efx_siena_recycle_rx_pages(struct efx_channel *channel,
+                               struct efx_rx_buffer *rx_buf,
+                               unsigned int n_frags);
+void efx_siena_discard_rx_packet(struct efx_channel *channel,
+                                struct efx_rx_buffer *rx_buf,
+                                unsigned int n_frags);
+
+int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue);
 
 static inline void efx_sync_rx_buffer(struct efx_nic *efx,
                                      struct efx_rx_buffer *rx_buf,
@@ -72,46 +65,46 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
                                DMA_FROM_DEVICE);
 }
 
-void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
-                        struct efx_rx_buffer *rx_buf,
-                        unsigned int num_bufs);
+void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
+                              struct efx_rx_buffer *rx_buf,
+                              unsigned int num_bufs);
 
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
-void efx_rx_config_page_split(struct efx_nic *efx);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
+void efx_siena_rx_config_page_split(struct efx_nic *efx);
+void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
+                                       bool atomic);
 
 void
 efx_siena_rx_packet_gro(struct efx_channel *channel,
                        struct efx_rx_buffer *rx_buf,
                        unsigned int n_frags, u8 *eh, __wsum csum);
 
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
-void efx_free_rss_context_entry(struct efx_rss_context *ctx);
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
-                                   struct efx_rss_context *ctx);
+struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx);
+struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
+                                                        u32 id);
+void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx);
+void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
+                                         struct efx_rss_context *ctx);
 
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
-                          const struct efx_filter_spec *right);
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
+bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
+                                const struct efx_filter_spec *right);
+u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec);
 
 #ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
-                       bool *force);
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
+                             unsigned int filter_idx, bool *force);
+struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
                                        const struct efx_filter_spec *spec);
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
-                                      const struct efx_filter_spec *spec,
-                                      bool *new);
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
-
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
-                  u16 rxq_index, u32 flow_id);
-bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
+void efx_siena_rps_hash_del(struct efx_nic *efx,
+                           const struct efx_filter_spec *spec);
+
+int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                        u16 rxq_index, u32 flow_id);
+bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
+                                  unsigned int quota);
 #endif
 
-int efx_probe_filters(struct efx_nic *efx);
-void efx_remove_filters(struct efx_nic *efx);
+int efx_siena_probe_filters(struct efx_nic *efx);
+void efx_siena_remove_filters(struct efx_nic *efx);
 
 #endif
index 118ec6f5f0972b6f33192ee86b9157c4a66a98bd..0677a0254d8534b9c159adbb8a47cec9b5f89dc0 100644 (file)
@@ -41,14 +41,6 @@ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
        return (u8 *)page_buf->addr + offset;
 }
 
-u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
-                                  struct efx_tx_buffer *buffer, size_t len)
-{
-       if (len > EFX_TX_CB_SIZE)
-               return NULL;
-       return efx_tx_get_copy_buffer(tx_queue, buffer);
-}
-
 static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
 {
        /* We need to consider all queues that the net core sees as one */
@@ -164,7 +156,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
         * size limit.
         */
        if (segments) {
-               rc = efx_tx_tso_fallback(tx_queue, skb);
+               rc = efx_siena_tx_tso_fallback(tx_queue, skb);
                tx_queue->tso_fallbacks++;
                if (rc == 0)
                        return 0;
@@ -178,7 +170,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
        }
 
        /* Map for DMA and create descriptors if we haven't done so already. */
-       if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
+       if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments)))
                goto err;
 
        efx_tx_maybe_stop_queue(tx_queue);
@@ -201,7 +193,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
 
 
 err:
-       efx_enqueue_unwind(tx_queue, old_insert_count);
+       efx_siena_enqueue_unwind(tx_queue, old_insert_count);
        dev_kfree_skb_any(skb);
 
        /* If we're not expecting another transmit and we had something to push
@@ -285,7 +277,7 @@ int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpf
                        break;
 
                /*  Create descriptor and set up for unmapping DMA. */
-               tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+               tx_buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
                tx_buffer->xdpf = xdpf;
                tx_buffer->flags = EFX_TX_BUF_XDP |
                                   EFX_TX_BUF_MAP_SINGLE;
index f2c4d2f89919d2530bdf1f2a40b37a442d69bac1..ee801950c9098a89ef199d29701c6a6c18d800ba 100644 (file)
 #include <linux/types.h>
 
 /* Driver internal tx-path related declarations. */
-
-unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
-                             dma_addr_t dma_addr, unsigned int len);
-
-u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
-                                  struct efx_tx_buffer *buffer, size_t len);
-
 /* What TXQ type will satisfy the checksum offloads required for this skb? */
 static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
 {
index 7945fe681e297c1854653b404c20751a0b8e7b85..66adc8525a3a529d195b483d597ae61397247013 100644 (file)
@@ -19,7 +19,7 @@ static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
                            PAGE_SIZE >> EFX_TX_CB_ORDER);
 }
 
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
+int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue)
 {
        struct efx_nic *efx = tx_queue->efx;
        unsigned int entries;
@@ -64,7 +64,7 @@ fail1:
        return rc;
 }
 
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue)
 {
        struct efx_nic *efx = tx_queue->efx;
 
@@ -94,32 +94,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
        tx_queue->initialised = true;
 }
 
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-{
-       struct efx_tx_buffer *buffer;
-
-       netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
-                 "shutting down TX queue %d\n", tx_queue->queue);
-
-       tx_queue->initialised = false;
-
-       if (!tx_queue->buffer)
-               return;
-
-       /* Free any buffers left in the ring */
-       while (tx_queue->read_count != tx_queue->write_count) {
-               unsigned int pkts_compl = 0, bytes_compl = 0;
-
-               buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
-               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-
-               ++tx_queue->read_count;
-       }
-       tx_queue->xmit_pending = false;
-       netdev_tx_reset_queue(tx_queue->core_txq);
-}
-
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
+void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
        int i;
 
@@ -143,10 +118,10 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
        tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
 }
 
-void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
-                       struct efx_tx_buffer *buffer,
-                       unsigned int *pkts_compl,
-                       unsigned int *bytes_compl)
+static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+                              struct efx_tx_buffer *buffer,
+                              unsigned int *pkts_compl,
+                              unsigned int *bytes_compl)
 {
        if (buffer->unmap_len) {
                struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
@@ -191,6 +166,29 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
        buffer->flags = 0;
 }
 
+void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+       struct efx_tx_buffer *buffer;
+
+       netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+                 "shutting down TX queue %d\n", tx_queue->queue);
+
+       if (!tx_queue->buffer)
+               return;
+
+       /* Free any buffers left in the ring */
+       while (tx_queue->read_count != tx_queue->write_count) {
+               unsigned int pkts_compl = 0, bytes_compl = 0;
+
+               buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+               ++tx_queue->read_count;
+       }
+       tx_queue->xmit_pending = false;
+       netdev_tx_reset_queue(tx_queue->core_txq);
+}
+
 /* Remove packets from the TX queue
  *
  * This removes packets from the TX queue, up to and including the
@@ -271,8 +269,8 @@ void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 /* Remove buffers put into a tx_queue for the current packet.
  * None of the buffers must have an skb attached.
  */
-void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
-                       unsigned int insert_count)
+void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
+                             unsigned int insert_count)
 {
        struct efx_tx_buffer *buffer;
        unsigned int bytes_compl = 0;
@@ -286,8 +284,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
        }
 }
 
-struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
-                                      dma_addr_t dma_addr, size_t len)
+struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
+                                            dma_addr_t dma_addr, size_t len)
 {
        const struct efx_nic_type *nic_type = tx_queue->efx->type;
        struct efx_tx_buffer *buffer;
@@ -313,7 +311,7 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
        return buffer;
 }
 
-int efx_tx_tso_header_length(struct sk_buff *skb)
+static int efx_tx_tso_header_length(struct sk_buff *skb)
 {
        size_t header_len;
 
@@ -328,8 +326,8 @@ int efx_tx_tso_header_length(struct sk_buff *skb)
 }
 
 /* Map all data from an SKB for DMA and create descriptors on the queue. */
-int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
-                   unsigned int segment_count)
+int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+                         unsigned int segment_count)
 {
        struct efx_nic *efx = tx_queue->efx;
        struct device *dma_dev = &efx->pci_dev->dev;
@@ -359,7 +357,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
 
                if (header_len != len) {
                        tx_queue->tso_long_headers++;
-                       efx_tx_map_chunk(tx_queue, dma_addr, header_len);
+                       efx_siena_tx_map_chunk(tx_queue, dma_addr, header_len);
                        len -= header_len;
                        dma_addr += header_len;
                }
@@ -370,7 +368,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
                struct efx_tx_buffer *buffer;
                skb_frag_t *fragment;
 
-               buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+               buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
 
                /* The final descriptor for a fragment is responsible for
                 * unmapping the whole fragment.
@@ -402,7 +400,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
        } while (1);
 }
 
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx)
 {
        /* Header and payload descriptor for each output segment, plus
         * one for every input fragment boundary within a segment
@@ -430,7 +428,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
  *
  * Returns 0 on success, error code otherwise.
  */
-int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue,
+                             struct sk_buff *skb)
 {
        struct sk_buff *segments, *next;
 
index 602f5a052918ba93efeb9d4afcb45aaf4af9534e..31ca52a25015176ad21124ba26935fc7dc5491d3 100644 (file)
 #ifndef EFX_TX_COMMON_H
 #define EFX_TX_COMMON_H
 
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-
-void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
-                       struct efx_tx_buffer *buffer,
-                       unsigned int *pkts_compl,
-                       unsigned int *bytes_compl);
+int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue);
 
 static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
 {
@@ -29,17 +24,16 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
 void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
 void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 
-void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
-                       unsigned int insert_count);
+void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
+                             unsigned int insert_count);
 
-struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
-                                      dma_addr_t dma_addr, size_t len);
-int efx_tx_tso_header_length(struct sk_buff *skb);
-int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
-                   unsigned int segment_count);
+struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
+                                            dma_addr_t dma_addr, size_t len);
+int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+                         unsigned int segment_count);
 
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
-int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx);
+int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 
-extern bool efx_separate_tx_channels;
+extern bool efx_siena_separate_tx_channels;
 #endif