]> git.baikalelectronics.ru Git - kernel.git/commitdiff
skbuff: remove __kfree_skb_flush()
authorAlexander Lobakin <alobakin@pm.me>
Sat, 13 Feb 2021 14:12:02 +0000 (14:12 +0000)
committerDavid S. Miller <davem@davemloft.net>
Sat, 13 Feb 2021 22:32:03 +0000 (14:32 -0800)
This function isn't much needed as NAPI skb queue gets bulk-freed
anyway when there's no more room, and even may reduce the efficiency
of bulk operations.
It will be even less needed after reusing skb cache on allocation path,
so remove it and this way lighten network softirqs a bit.

Suggested-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Alexander Lobakin <alobakin@pm.me>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/skbuff.h
net/core/dev.c
net/core/skbuff.c

index 0a4e91a2f873257f6af81cc0a6d9cf64b9bea517..0e07072960983c800755dfc3e21880423b1548ae 100644 (file)
@@ -2919,7 +2919,6 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
 }
 void napi_consume_skb(struct sk_buff *skb, int budget);
 
-void __kfree_skb_flush(void);
 void __kfree_skb_defer(struct sk_buff *skb);
 
 /**
index ce6291bc2e166a4f05c6a9d25266b24c650f934f..631807c196ad395a884b0d61103e73bee0060578 100644 (file)
@@ -4944,8 +4944,6 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
                        else
                                __kfree_skb_defer(skb);
                }
-
-               __kfree_skb_flush();
        }
 
        if (sd->output_queue) {
@@ -7012,7 +7010,6 @@ static int napi_threaded_poll(void *data)
                        __napi_poll(napi, &repoll);
                        netpoll_poll_unlock(have);
 
-                       __kfree_skb_flush();
                        local_bh_enable();
 
                        if (!repoll)
@@ -7042,7 +7039,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
 
                if (list_empty(&list)) {
                        if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
-                               goto out;
+                               return;
                        break;
                }
 
@@ -7069,8 +7066,6 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
                __raise_softirq_irqoff(NET_RX_SOFTIRQ);
 
        net_rps_action_and_irq_enable(sd);
-out:
-       __kfree_skb_flush();
 }
 
 struct netdev_adjacent {
index 1c6f6ef70339a153d3b3b9d861ca74db83e8bdd8..4be2bb969535e0c66968844fdfd4a46ba8e0c800 100644 (file)
@@ -838,18 +838,6 @@ void __consume_stateless_skb(struct sk_buff *skb)
        kfree_skbmem(skb);
 }
 
-void __kfree_skb_flush(void)
-{
-       struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
-
-       /* flush skb_cache if containing objects */
-       if (nc->skb_count) {
-               kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
-                                    nc->skb_cache);
-               nc->skb_count = 0;
-       }
-}
-
 static inline void _kfree_skb_defer(struct sk_buff *skb)
 {
        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);