]> git.baikalelectronics.ru Git - kernel.git/commitdiff
RDMA/siw: Always consume all skbuf data in sk_data_ready() upcall.
authorBernard Metzler <bmt@zurich.ibm.com>
Tue, 20 Sep 2022 08:12:02 +0000 (10:12 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 26 Oct 2022 11:22:38 +0000 (13:22 +0200)
[ Upstream commit 667f5e8370368c369f009d3c15a47b1f13a6ef54 ]

For header and trailer/padding processing, siw did not consume new
skb data until minimum amount present to fill current header or trailer
structure, including potential payload padding. Not consuming any
data during upcall may cause a receive stall, since tcp_read_sock()
is not upcalling again if no new data arrive.
A NFSoRDMA client got stuck at RDMA Write reception of unaligned
payload, if the current skb did contain only the expected 3 padding
bytes, but not the 4 bytes CRC trailer. Expecting 4 more bytes already
arrived in another skb, and not consuming those 3 bytes in the current
upcall left the Write incomplete, waiting for the CRC forever.

Fixes: b729ba0e6e4a ("rdma/siw: receive path")
Reported-by: Olga Kornievskaia <kolga@netapp.com>
Tested-by: Olga Kornievskaia <kolga@netapp.com>
Signed-off-by: Bernard Metzler <bmt@zurich.ibm.com>
Link: https://lore.kernel.org/r/20220920081202.223629-1-bmt@zurich.ibm.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/infiniband/sw/siw/siw_qp_rx.c

index 5f94c716301fa81b9b64b8138844879e0d81aa2e..e8a1aa07f05828513192e6c2b01cab6d22e69d06 100644 (file)
@@ -961,27 +961,28 @@ out:
 static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx)
 {
        struct sk_buff *skb = srx->skb;
+       int avail = min(srx->skb_new, srx->fpdu_part_rem);
        u8 *tbuf = (u8 *)&srx->trailer.crc - srx->pad;
        __wsum crc_in, crc_own = 0;
 
        siw_dbg_qp(qp, "expected %d, available %d, pad %u\n",
                   srx->fpdu_part_rem, srx->skb_new, srx->pad);
 
-       if (srx->skb_new < srx->fpdu_part_rem)
-               return -EAGAIN;
-
-       skb_copy_bits(skb, srx->skb_offset, tbuf, srx->fpdu_part_rem);
+       skb_copy_bits(skb, srx->skb_offset, tbuf, avail);
 
-       if (srx->mpa_crc_hd && srx->pad)
-               crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
+       srx->skb_new -= avail;
+       srx->skb_offset += avail;
+       srx->skb_copied += avail;
+       srx->fpdu_part_rem -= avail;
 
-       srx->skb_new -= srx->fpdu_part_rem;
-       srx->skb_offset += srx->fpdu_part_rem;
-       srx->skb_copied += srx->fpdu_part_rem;
+       if (srx->fpdu_part_rem)
+               return -EAGAIN;
 
        if (!srx->mpa_crc_hd)
                return 0;
 
+       if (srx->pad)
+               crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad);
        /*
         * CRC32 is computed, transmitted and received directly in NBO,
         * so there's never a reason to convert byte order.
@@ -1083,10 +1084,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
         * completely received.
         */
        if (iwarp_pktinfo[opcode].hdr_len > sizeof(struct iwarp_ctrl_tagged)) {
-               bytes = iwarp_pktinfo[opcode].hdr_len - MIN_DDP_HDR;
+               int hdrlen = iwarp_pktinfo[opcode].hdr_len;
 
-               if (srx->skb_new < bytes)
-                       return -EAGAIN;
+               bytes = min_t(int, hdrlen - MIN_DDP_HDR, srx->skb_new);
 
                skb_copy_bits(skb, srx->skb_offset,
                              (char *)c_hdr + srx->fpdu_part_rcvd, bytes);
@@ -1096,6 +1096,9 @@ static int siw_get_hdr(struct siw_rx_stream *srx)
                srx->skb_new -= bytes;
                srx->skb_offset += bytes;
                srx->skb_copied += bytes;
+
+               if (srx->fpdu_part_rcvd < hdrlen)
+                       return -EAGAIN;
        }
 
        /*