]> git.baikalelectronics.ru Git - kernel.git/commitdiff
ch_ktls: Fix kernel panic
authorVinay Kumar Yadav <vinay.yadav@chelsio.com>
Thu, 15 Apr 2021 07:47:45 +0000 (13:17 +0530)
committerDavid S. Miller <davem@davemloft.net>
Thu, 15 Apr 2021 23:55:49 +0000 (16:55 -0700)
Taking page refcount is not ideal and causes kernel panic
sometimes. It's better to take tx_ctx lock for the complete
skb transmit, to avoid page cleanup if ACK received in middle.

Fixes: 33acbcec9b50 ("cxgb4/chcr: complete record tx handling")
Signed-off-by: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c

index 1115b8f9ea4e393180f6379b108917030e70dce2..e39fa09403678e06b664fbf90883e07a546e8bd7 100644 (file)
@@ -2010,12 +2010,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
         * we will send the complete record again.
         */
 
+       spin_lock_irqsave(&tx_ctx->base.lock, flags);
+
        do {
-               int i;
 
                cxgb4_reclaim_completed_tx(adap, &q->q, true);
-               /* lock taken */
-               spin_lock_irqsave(&tx_ctx->base.lock, flags);
                /* fetch the tls record */
                record = tls_get_record(&tx_ctx->base, tcp_seq,
                                        &tx_info->record_no);
@@ -2074,11 +2073,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
                                                    tls_end_offset, skb_offset,
                                                    0);
 
-                       spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
                        if (ret) {
                                /* free the refcount taken earlier */
                                if (tls_end_offset < data_len)
                                        dev_kfree_skb_any(skb);
+                               spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
                                goto out;
                        }
 
@@ -2088,16 +2087,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
                        continue;
                }
 
-               /* increase page reference count of the record, so that there
-                * won't be any chance of page free in middle if in case stack
-                * receives ACK and try to delete the record.
-                */
-               for (i = 0; i < record->num_frags; i++)
-                       __skb_frag_ref(&record->frags[i]);
-               /* lock cleared */
-               spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
-
-
                /* if a tls record is finishing in this SKB */
                if (tls_end_offset <= data_len) {
                        ret = chcr_end_part_handler(tx_info, skb, record,
@@ -2122,13 +2111,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
                        data_len = 0;
                }
 
-               /* clear the frag ref count which increased locally before */
-               for (i = 0; i < record->num_frags; i++) {
-                       /* clear the frag ref count */
-                       __skb_frag_unref(&record->frags[i]);
-               }
                /* if any failure, come out from the loop. */
                if (ret) {
+                       spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
                        if (th->fin)
                                dev_kfree_skb_any(skb);
 
@@ -2143,6 +2128,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
 
        } while (data_len > 0);
 
+       spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
        atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
        atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);