]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net/funeth: Support UDP segmentation offload
authorDimitris Michailidis <d.michailidis@fungible.com>
Wed, 22 Jun 2022 22:37:03 +0000 (15:37 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 24 Jun 2022 23:21:05 +0000 (16:21 -0700)
Handle skbs with SKB_GSO_UDP_L4, advertise the offload in features, and
add an ethtool counter for it. Small change to existing TSO code due to
UDP's different header length.

Signed-off-by: Dimitris Michailidis <dmichail@fungible.com>
Link: https://lore.kernel.org/r/20220622223703.59886-1-dmichail@fungible.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
drivers/net/ethernet/fungible/funeth/funeth_main.c
drivers/net/ethernet/fungible/funeth/funeth_tx.c
drivers/net/ethernet/fungible/funeth/funeth_txrx.h

index d081168c95fa2b16e48f8adac9b4e9ff1f03f088..da42dd53a87cd24d554b90cb51d445e3ba529efb 100644 (file)
@@ -78,6 +78,7 @@ static const char * const txq_stat_names[] = {
        "tx_cso",
        "tx_tso",
        "tx_encapsulated_tso",
+       "tx_uso",
        "tx_more",
        "tx_queue_stops",
        "tx_queue_restarts",
@@ -778,6 +779,7 @@ static void fun_get_ethtool_stats(struct net_device *netdev,
                ADD_STAT(txs.tx_cso);
                ADD_STAT(txs.tx_tso);
                ADD_STAT(txs.tx_encap_tso);
+               ADD_STAT(txs.tx_uso);
                ADD_STAT(txs.tx_more);
                ADD_STAT(txs.tx_nstops);
                ADD_STAT(txs.tx_nrestarts);
index 9485cf699c5d647793c76b146be74e4574720397..f247b7ad3a8892a0ae780514d641100be5c1b59c 100644 (file)
@@ -1357,7 +1357,8 @@ static const struct net_device_ops fun_netdev_ops = {
 #define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
                         NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
                         NETIF_F_GSO_UDP_TUNNEL_CSUM)
-#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
+                  NETIF_F_GSO_UDP_L4)
 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \
                   GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA)
 
index ff6e292372535eb145e7f9bf3c73750d961b5fa6..0a4a590218ba9e4b83d81f8cfd4be5ede290c1a2 100644 (file)
@@ -130,6 +130,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
        struct fun_dataop_gl *gle;
        const struct tcphdr *th;
        unsigned int ngle, i;
+       unsigned int l4_hlen;
        u16 flags;
 
        if (unlikely(map_skb(skb, q->dma_dev, addrs, lens))) {
@@ -178,6 +179,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
                                                 FUN_ETH_UPDATE_INNER_L3_LEN;
                        }
                        th = inner_tcp_hdr(skb);
+                       l4_hlen = __tcp_hdrlen(th);
                        fun_eth_offload_init(&req->offload, flags,
                                             shinfo->gso_size,
                                             tcp_hdr_doff_flags(th), 0,
@@ -185,6 +187,24 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
                                             skb_inner_transport_offset(skb),
                                             skb_network_offset(skb), ol4_ofst);
                        FUN_QSTAT_INC(q, tx_encap_tso);
+               } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
+                       flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP |
+                               FUN_ETH_UPDATE_INNER_L4_CKSUM |
+                               FUN_ETH_UPDATE_INNER_L4_LEN |
+                               FUN_ETH_UPDATE_INNER_L3_LEN;
+
+                       if (ip_hdr(skb)->version == 4)
+                               flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
+                       else
+                               flags |= FUN_ETH_INNER_IPV6;
+
+                       l4_hlen = sizeof(struct udphdr);
+                       fun_eth_offload_init(&req->offload, flags,
+                                            shinfo->gso_size,
+                                            cpu_to_be16(l4_hlen << 10), 0,
+                                            skb_network_offset(skb),
+                                            skb_transport_offset(skb), 0, 0);
+                       FUN_QSTAT_INC(q, tx_uso);
                } else {
                        /* HW considers one set of headers as inner */
                        flags = FUN_ETH_INNER_LSO |
@@ -195,6 +215,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
                        else
                                flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
                        th = tcp_hdr(skb);
+                       l4_hlen = __tcp_hdrlen(th);
                        fun_eth_offload_init(&req->offload, flags,
                                             shinfo->gso_size,
                                             tcp_hdr_doff_flags(th), 0,
@@ -209,7 +230,7 @@ static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
 
                extra_pkts = shinfo->gso_segs - 1;
                extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
-                              __tcp_hdrlen(th)) * extra_pkts;
+                              l4_hlen) * extra_pkts;
        } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
                flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
                if (skb->csum_offset == offsetof(struct udphdr, check))
index 04c9f91b7489bf6f9d13fb1ff527e2f689b4d104..1711f82cad711dc6a771e7e9f21f13e62d2f115b 100644 (file)
@@ -82,6 +82,7 @@ struct funeth_txq_stats {  /* per Tx queue SW counters */
        u64 tx_cso;        /* # of packets with checksum offload */
        u64 tx_tso;        /* # of non-encapsulated TSO super-packets */
        u64 tx_encap_tso;  /* # of encapsulated TSO super-packets */
+       u64 tx_uso;        /* # of non-encapsulated UDP LSO super-packets */
        u64 tx_more;       /* # of DBs elided due to xmit_more */
        u64 tx_nstops;     /* # of times the queue has stopped */
        u64 tx_nrestarts;  /* # of times the queue has restarted */