For siena use efx_siena_ as the function prefix.
Several functions are not used in Siena, so they are removed.
Use a Siena specific variable name for module parameter
efx_separate_tx_channels.
Move efx_fini_tx_queue() to avoid a forward declaration of
efx_dequeue_buffer().
Signed-off-by: Martin Habets <habetsm.xilinx@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
*
* This is only used in MSI-X interrupt mode
*/
-bool efx_separate_tx_channels;
-module_param(efx_separate_tx_channels, bool, 0444);
+bool efx_siena_separate_tx_channels;
+module_param_named(efx_separate_tx_channels, efx_siena_separate_tx_channels,
+ bool, 0444);
MODULE_PARM_DESC(efx_separate_tx_channels,
"Use separate channels for TX and RX");
if (efx->n_channels > 1)
netdev_rss_key_fill(efx->rss_context.rx_hash_key,
sizeof(efx->rss_context.rx_hash_key));
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ efx_siena_set_default_rx_indir_table(efx, &efx->rss_context);
/* Initialise the interrupt moderation settings */
efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
" VFs may not function\n", rc);
#endif
- rc = efx_probe_filters(efx);
+ rc = efx_siena_probe_filters(efx);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n");
return 0;
fail5:
- efx_remove_filters(efx);
+ efx_siena_remove_filters(efx);
fail4:
#ifdef CONFIG_SFC_SRIOV
efx->type->vswitching_remove(efx);
rtnl_unlock();
efx_siena_remove_channels(efx);
- efx_remove_filters(efx);
+ efx_siena_remove_filters(efx);
#ifdef CONFIG_SFC_SRIOV
efx->type->vswitching_remove(efx);
#endif
.ndo_get_phys_port_name = efx_siena_get_phys_port_name,
.ndo_setup_tc = efx_siena_setup_tc,
#ifdef CONFIG_RFS_ACCEL
- .ndo_rx_flow_steer = efx_filter_rfs,
+ .ndo_rx_flow_steer = efx_siena_filter_rfs,
#endif
.ndo_xdp_xmit = efx_xdp_xmit,
.ndo_bpf = efx_xdp
* TSO skbs.
*/
#define EFX_RXQ_MIN_ENT 128U
-#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
+#define EFX_TXQ_MIN_ENT(efx) (2 * efx_siena_tx_max_skb_descs(efx))
/* All EF10 architecture NICs steal one bit of the DMAQ size for various
* other purposes when counting TxQ entries, so we halve the queue size.
*
* 2. If the existing filters have higher priority, return -%EPERM.
*
- * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
+ * 3. If !efx_siena_filter_is_mc_recipient(@spec), or the NIC does not
* support delivery to multiple recipients, return -%EEXIST.
*
* This implies that filters for multiple multicast recipients must
int n_xdp_tx;
int n_xdp_ev;
- if (efx_separate_tx_channels)
+ if (efx_siena_separate_tx_channels)
n_channels *= 2;
n_channels += extra_channels;
/* Ignore XDP tx channels when creating rx channels. */
n_channels -= efx->n_xdp_channels;
- if (efx_separate_tx_channels) {
+ if (efx_siena_separate_tx_channels) {
efx->n_tx_channels =
min(max(n_channels / 2, 1U),
efx->max_tx_channels);
/* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
- efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
+ efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
efx->n_xdp_channels = 0;
channel = container_of(dwork, struct efx_channel, filter_work);
time = jiffies - channel->rfs_last_expiry;
quota = channel->rfs_filter_count * time / (30 * HZ);
- if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
+ if (quota >= 20 && __efx_siena_filter_rfs_expire(channel,
+ min(channel->rfs_filter_count, quota)))
channel->rfs_last_expiry += time;
/* Ensure we do more work eventually even if NAPI poll is not happening */
schedule_delayed_work(dwork, 30 * HZ);
rx_queue = &channel->rx_queue;
rx_queue->efx = efx;
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+ timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
return channel;
}
rx_queue = &channel->rx_queue;
rx_queue->buffer = NULL;
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
- timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+ timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
#ifdef CONFIG_RFS_ACCEL
INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
#endif
goto fail;
efx_for_each_channel_tx_queue(tx_queue, channel) {
- rc = efx_probe_tx_queue(tx_queue);
+ rc = efx_siena_probe_tx_queue(tx_queue);
if (rc)
goto fail;
}
efx_for_each_channel_rx_queue(rx_queue, channel) {
- rc = efx_probe_rx_queue(rx_queue);
+ rc = efx_siena_probe_rx_queue(rx_queue);
if (rc)
goto fail;
}
"destroy chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_remove_rx_queue(rx_queue);
+ efx_siena_remove_rx_queue(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_remove_tx_queue(tx_queue);
+ efx_siena_remove_tx_queue(tx_queue);
efx_remove_eventq(channel);
channel->type->post_remove(channel);
}
int rc;
efx->tx_channel_offset =
- efx_separate_tx_channels ?
+ efx_siena_separate_tx_channels ?
efx->n_channels - efx->n_tx_channels : 0;
if (efx->xdp_tx_queue_count) {
efx_for_each_channel_rev(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_init_tx_queue(tx_queue);
+ efx_siena_init_tx_queue(tx_queue);
atomic_inc(&efx->active_queues);
}
efx_for_each_channel_rx_queue(rx_queue, channel) {
- efx_init_rx_queue(rx_queue);
+ efx_siena_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues);
efx_siena_stop_eventq(channel);
- efx_fast_push_rx_descriptors(rx_queue, false);
+ efx_siena_fast_push_rx_descriptors(rx_queue, false);
efx_siena_start_eventq(channel);
}
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
- efx_fini_rx_queue(rx_queue);
+ efx_siena_fini_rx_queue(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_fini_tx_queue(tx_queue);
+ efx_siena_fini_tx_queue(tx_queue);
}
}
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- efx_fast_push_rx_descriptors(rx_queue, true);
+ efx_siena_fast_push_rx_descriptors(rx_queue, true);
}
/* Update BQL */
efx->rx_buffer_order = get_order(rx_buf_len);
}
- efx_rx_config_page_split(efx);
+ efx_siena_rx_config_page_split(efx);
if (efx->rx_buffer_order)
netif_dbg(efx, drv, efx->net_dev,
"RX buf len=%u; page order=%u batch=%u\n",
* the ring completely. We wake it when half way back to
* empty.
*/
- efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+ efx->txq_stop_thresh = efx->txq_entries - efx_siena_tx_max_skb_descs(efx);
efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
/* Initialise the channels */
mutex_lock(&efx->rss_lock);
if (info->flow_type & FLOW_RSS && info->rss_context) {
- ctx = efx_find_rss_context_entry(efx, info->rss_context);
+ ctx = efx_siena_find_rss_context_entry(efx,
+ info->rss_context);
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock);
- ctx = efx_find_rss_context_entry(efx, rss_context);
+ ctx = efx_siena_find_rss_context_entry(efx, rss_context);
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
rc = -EINVAL;
goto out_unlock;
}
- ctx = efx_alloc_rss_context_entry(efx);
+ ctx = efx_siena_alloc_rss_context_entry(efx);
if (!ctx) {
rc = -ENOMEM;
goto out_unlock;
}
ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Initialise indir table and key to defaults */
- efx_set_default_rx_indir_table(efx, ctx);
+ efx_siena_set_default_rx_indir_table(efx, ctx);
netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
allocated = true;
} else {
- ctx = efx_find_rss_context_entry(efx, *rss_context);
+ ctx = efx_siena_find_rss_context_entry(efx, *rss_context);
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
/* delete this context */
rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
if (!rc)
- efx_free_rss_context_entry(ctx);
+ efx_siena_free_rss_context_entry(ctx);
goto out_unlock;
}
rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
if (rc && allocated)
- efx_free_rss_context_entry(ctx);
+ efx_siena_free_rss_context_entry(ctx);
else
*rss_context = ctx->user_id;
out_unlock:
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue, true);
+ efx_siena_fast_push_rx_descriptors(rx_queue, true);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
*/
arfs_id = 0;
} else {
- rule = efx_rps_hash_find(efx, &spec);
+ rule = efx_siena_rps_hash_find(efx, &spec);
if (!rule) {
/* ARFS table doesn't know of this filter, remove it */
force = true;
} else {
arfs_id = rule->arfs_id;
- if (!efx_rps_check_rule(rule, index, &force))
+ if (!efx_siena_rps_check_rule(rule, index,
+ &force))
goto out_unlock;
}
}
flow_id, arfs_id)) {
if (rule)
rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
- efx_rps_hash_del(efx, &spec);
+ efx_siena_rps_hash_del(efx, &spec);
efx_farch_filter_table_clear_entry(efx, table, index);
ret = true;
}
*/
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
efx_rx_flush_packet(channel);
- efx_discard_rx_packet(channel, rx_buf, n_frags);
+ efx_siena_discard_rx_packet(channel, rx_buf, n_frags);
return;
}
/* All fragments have been DMA-synced, so recycle pages. */
rx_buf = efx_rx_buffer(rx_queue, index);
- efx_recycle_rx_pages(channel, rx_buf, n_frags);
+ efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
struct efx_rx_queue *rx_queue;
rx_queue = efx_channel_get_rx_queue(channel);
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
skb_record_rx_queue(skb, channel->rx_queue.core_index);
if (unlikely(channel->rx_pkt_n_frags > 1)) {
/* We can't do XDP on fragmented packets - drop. */
- efx_free_rx_buffers(rx_queue, rx_buf,
- channel->rx_pkt_n_frags);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
"XDP is not possible with multiple receive fragments (%d)\n",
xdpf = xdp_convert_buff_to_frame(&xdp);
err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true);
if (unlikely(err != 1)) {
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
"XDP TX failed (%d)\n", err);
case XDP_REDIRECT:
err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
if (unlikely(err)) {
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
"XDP redirect failed (%d)\n", err);
default:
bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_bad_drops++;
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
break;
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_DROP:
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_drops++;
break;
}
efx_loopback_rx_packet(efx, eh, rx_buf->len);
rx_queue = efx_channel_get_rx_queue(channel);
- efx_free_rx_buffers(rx_queue, rx_buf,
- channel->rx_pkt_n_frags);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
goto out;
}
*/
#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf);
+
/* Check the RX page recycle ring for a page that can be reused. */
static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
{
}
/* Recycle the pages that are used by buffers that have just been received. */
-void efx_recycle_rx_pages(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
+void efx_siena_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
} while (--n_frags);
}
-void efx_discard_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
+void efx_siena_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- efx_recycle_rx_pages(channel, rx_buf, n_frags);
+ efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
}
static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
/* If this is the last buffer in a page, unmap and free it. */
if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
- efx_free_rx_buffers(rx_queue, rx_buf, 1);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
}
rx_buf->page = NULL;
}
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int entries;
return rc;
}
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue)
{
unsigned int max_fill, trigger, max_trigger;
struct efx_nic *efx = rx_queue->efx;
efx_nic_init_rx(rx_queue);
}
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_rx_buffer *rx_buf;
int i;
rx_queue->xdp_rxq_info_valid = false;
}
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue)
{
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
/* Unmap a DMA-mapped page. This function is only called for the final RX
* buffer in a page.
*/
-void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+static void efx_unmap_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf)
{
struct page *page = rx_buf->page;
}
}
-void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf,
- unsigned int num_bufs)
+void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs)
{
do {
if (rx_buf->page) {
} while (--num_bufs);
}
-void efx_rx_slow_fill(struct timer_list *t)
+void efx_siena_rx_slow_fill(struct timer_list *t)
{
struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
++rx_queue->slow_fill_count;
}
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
{
mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
}
return 0;
}
-void efx_rx_config_page_split(struct efx_nic *efx)
+void efx_siena_rx_config_page_split(struct efx_nic *efx)
{
efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
efx->rx_bufs_per_page);
}
-/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
+/* efx_siena_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
*
* This will aim to fill the RX descriptor queue up to
* this means this function must run from the NAPI handler, or be called
* when NAPI is disabled.
*/
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
+void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
+ bool atomic)
{
struct efx_nic *efx = rx_queue->efx;
unsigned int fill_level, batch_size;
struct efx_rx_queue *rx_queue;
rx_queue = efx_channel_get_rx_queue(channel);
- efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+ efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
* (a) this is an infrequent control-plane operation and (b) n is small (max 64)
*/
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
+struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx)
{
struct list_head *head = &efx->rss_context.list;
struct efx_rss_context *ctx, *new;
return new;
}
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
+struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
+ u32 id)
{
struct list_head *head = &efx->rss_context.list;
struct efx_rss_context *ctx;
return NULL;
}
-void efx_free_rss_context_entry(struct efx_rss_context *ctx)
+void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx)
{
list_del(&ctx->list);
kfree(ctx);
}
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx)
+void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx)
{
size_t i;
}
/**
- * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * efx_siena_filter_is_mc_recipient - test whether spec is a multicast recipient
* @spec: Specification to test
*
* Return: %true if the specification is a non-drop RX filter that
* IPv4 or IPv6 address value in the respective multicast address
* range. Otherwise %false.
*/
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec)
{
if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
return false;
}
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
+bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
{
if ((left->match_flags ^ right->match_flags) |
((left->flags ^ right->flags) &
offsetof(struct efx_filter_spec, outer_vid)) == 0;
}
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec)
{
BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
return jhash2((const u32 *)&spec->outer_vid,
}
#ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
- bool *force)
+bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
+ unsigned int filter_idx, bool *force)
{
if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
/* ARFS is currently updating this entry, leave it */
} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
/* ARFS has moved on, so old filter is not needed. Since we did
* not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
- * not be removed by efx_rps_hash_del() subsequently.
+ * not be removed by efx_siena_rps_hash_del() subsequently.
*/
*force = true;
return true;
struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
const struct efx_filter_spec *spec)
{
- u32 hash = efx_filter_spec_hash(spec);
+ u32 hash = efx_siena_filter_spec_hash(spec);
lockdep_assert_held(&efx->rps_hash_lock);
if (!efx->rps_hash_table)
return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
}
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
const struct efx_filter_spec *spec)
{
struct efx_arfs_rule *rule;
return NULL;
hlist_for_each(node, head) {
rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec))
+ if (efx_siena_filter_spec_equal(spec, &rule->spec))
return rule;
}
return NULL;
}
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- bool *new)
+static struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ bool *new)
{
struct efx_arfs_rule *rule;
struct hlist_head *head;
return NULL;
hlist_for_each(node, head) {
rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec)) {
+ if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
*new = false;
return rule;
}
return rule;
}
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
+void efx_siena_rps_hash_del(struct efx_nic *efx,
+ const struct efx_filter_spec *spec)
{
struct efx_arfs_rule *rule;
struct hlist_head *head;
return;
hlist_for_each(node, head) {
rule = container_of(node, struct efx_arfs_rule, node);
- if (efx_filter_spec_equal(spec, &rule->spec)) {
+ if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
/* Someone already reused the entry. We know that if
* this check doesn't fire (i.e. filter_id == REMOVING)
* then the REMOVING mark was put there by our caller,
}
#endif
-int efx_probe_filters(struct efx_nic *efx)
+int efx_siena_probe_filters(struct efx_nic *efx)
{
int rc;
return rc;
}
-void efx_remove_filters(struct efx_nic *efx)
+void efx_siena_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
struct efx_channel *channel;
rc %= efx->type->max_rx_ip_filters;
if (efx->rps_hash_table) {
spin_lock_bh(&efx->rps_hash_lock);
- rule = efx_rps_hash_find(efx, &req->spec);
+ rule = efx_siena_rps_hash_find(efx, &req->spec);
/* The rule might have already gone, if someone else's request
* for the same spec was already worked and then expired before
* we got around to our work. In that case we have nothing
/* We're overloading the NIC's filter tables, so let's do a
* chunk of extra expiry work.
*/
- __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
- 100u));
+ __efx_siena_filter_rfs_expire(channel,
+ min(channel->rfs_filter_count,
+ 100u));
}
/* Release references */
dev_put(req->net_dev);
}
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id)
+int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_async_filter_insertion *req;
return rc;
}
-bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
+bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
+ unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
struct efx_nic *efx = channel->efx;
#endif
}
-void efx_rx_slow_fill(struct timer_list *t);
-
-void efx_recycle_rx_pages(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags);
-void efx_discard_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags);
-
-int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
-
-void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
- struct page *page,
- unsigned int page_offset,
- u16 flags);
-void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
+void efx_siena_rx_slow_fill(struct timer_list *t);
+
+void efx_siena_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags);
+void efx_siena_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags);
+
+int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue);
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf,
DMA_FROM_DEVICE);
}
-void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf,
- unsigned int num_bufs);
+void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs);
-void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
-void efx_rx_config_page_split(struct efx_nic *efx);
-void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
+void efx_siena_rx_config_page_split(struct efx_nic *efx);
+void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
+ bool atomic);
void
efx_siena_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum);
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
-void efx_free_rss_context_entry(struct efx_rss_context *ctx);
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx);
+struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx);
+struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
+ u32 id);
+void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx);
+void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
+ struct efx_rss_context *ctx);
-bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
-bool efx_filter_spec_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right);
-u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
+bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right);
+u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec);
#ifdef CONFIG_RFS_ACCEL
-bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
- bool *force);
-struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
+ unsigned int filter_idx, bool *force);
+struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
const struct efx_filter_spec *spec);
-struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
- const struct efx_filter_spec *spec,
- bool *new);
-void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
-
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id);
-bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
+void efx_siena_rps_hash_del(struct efx_nic *efx,
+ const struct efx_filter_spec *spec);
+
+int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
+ unsigned int quota);
#endif
-int efx_probe_filters(struct efx_nic *efx);
-void efx_remove_filters(struct efx_nic *efx);
+int efx_siena_probe_filters(struct efx_nic *efx);
+void efx_siena_remove_filters(struct efx_nic *efx);
#endif
return (u8 *)page_buf->addr + offset;
}
-u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer, size_t len)
-{
- if (len > EFX_TX_CB_SIZE)
- return NULL;
- return efx_tx_get_copy_buffer(tx_queue, buffer);
-}
-
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider all queues that the net core sees as one */
* size limit.
*/
if (segments) {
- rc = efx_tx_tso_fallback(tx_queue, skb);
+ rc = efx_siena_tx_tso_fallback(tx_queue, skb);
tx_queue->tso_fallbacks++;
if (rc == 0)
return 0;
}
/* Map for DMA and create descriptors if we haven't done so already. */
- if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
+ if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments)))
goto err;
efx_tx_maybe_stop_queue(tx_queue);
err:
- efx_enqueue_unwind(tx_queue, old_insert_count);
+ efx_siena_enqueue_unwind(tx_queue, old_insert_count);
dev_kfree_skb_any(skb);
/* If we're not expecting another transmit and we had something to push
break;
/* Create descriptor and set up for unmapping DMA. */
- tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+ tx_buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
tx_buffer->xdpf = xdpf;
tx_buffer->flags = EFX_TX_BUF_XDP |
EFX_TX_BUF_MAP_SINGLE;
#include <linux/types.h>
/* Driver internal tx-path related declarations. */
-
-unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
- dma_addr_t dma_addr, unsigned int len);
-
-u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer, size_t len);
-
/* What TXQ type will satisfy the checksum offloads required for this skb? */
static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
{
PAGE_SIZE >> EFX_TX_CB_ORDER);
}
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
+int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int entries;
return rc;
}
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
tx_queue->initialised = true;
}
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_tx_buffer *buffer;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "shutting down TX queue %d\n", tx_queue->queue);
-
- tx_queue->initialised = false;
-
- if (!tx_queue->buffer)
- return;
-
- /* Free any buffers left in the ring */
- while (tx_queue->read_count != tx_queue->write_count) {
- unsigned int pkts_compl = 0, bytes_compl = 0;
-
- buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
-
- ++tx_queue->read_count;
- }
- tx_queue->xmit_pending = false;
- netdev_tx_reset_queue(tx_queue->core_txq);
-}
-
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
+void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
int i;
tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
}
-void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl)
+static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
buffer->flags = 0;
}
+void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_tx_buffer *buffer;
+
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
+ if (!tx_queue->buffer)
+ return;
+
+ /* Free any buffers left in the ring */
+ while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+
+ buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+ ++tx_queue->read_count;
+ }
+ tx_queue->xmit_pending = false;
+ netdev_tx_reset_queue(tx_queue->core_txq);
+}
+
/* Remove packets from the TX queue
*
* This removes packets from the TX queue, up to and including the
/* Remove buffers put into a tx_queue for the current packet.
* None of the buffers must have an skb attached.
*/
-void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
- unsigned int insert_count)
+void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count)
{
struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0;
}
}
-struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
- dma_addr_t dma_addr, size_t len)
+struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len)
{
const struct efx_nic_type *nic_type = tx_queue->efx->type;
struct efx_tx_buffer *buffer;
return buffer;
}
-int efx_tx_tso_header_length(struct sk_buff *skb)
+static int efx_tx_tso_header_length(struct sk_buff *skb)
{
size_t header_len;
}
/* Map all data from an SKB for DMA and create descriptors on the queue. */
-int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
- unsigned int segment_count)
+int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count)
{
struct efx_nic *efx = tx_queue->efx;
struct device *dma_dev = &efx->pci_dev->dev;
if (header_len != len) {
tx_queue->tso_long_headers++;
- efx_tx_map_chunk(tx_queue, dma_addr, header_len);
+ efx_siena_tx_map_chunk(tx_queue, dma_addr, header_len);
len -= header_len;
dma_addr += header_len;
}
struct efx_tx_buffer *buffer;
skb_frag_t *fragment;
- buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+ buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
/* The final descriptor for a fragment is responsible for
* unmapping the whole fragment.
} while (1);
}
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx)
{
/* Header and payload descriptor for each output segment, plus
* one for every input fragment boundary within a segment
*
* Returns 0 on success, error code otherwise.
*/
-int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue,
+ struct sk_buff *skb)
{
struct sk_buff *segments, *next;
#ifndef EFX_TX_COMMON_H
#define EFX_TX_COMMON_H
-int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-
-void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer,
- unsigned int *pkts_compl,
- unsigned int *bytes_compl);
+int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue);
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
{
void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
- unsigned int insert_count);
+void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
+ unsigned int insert_count);
-struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
- dma_addr_t dma_addr, size_t len);
-int efx_tx_tso_header_length(struct sk_buff *skb);
-int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
- unsigned int segment_count);
+struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
+ dma_addr_t dma_addr, size_t len);
+int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ unsigned int segment_count);
-unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
-int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx);
+int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
-extern bool efx_separate_tx_channels;
+extern bool efx_siena_separate_tx_channels;
#endif