]> git.baikalelectronics.ru Git - kernel.git/commitdiff
tcp: take care of mixed splice()/sendmsg(MSG_ZEROCOPY) case
authorEric Dumazet <edumazet@google.com>
Thu, 3 Feb 2022 22:55:47 +0000 (14:55 -0800)
committerJakub Kicinski <kuba@kernel.org>
Sat, 5 Feb 2022 04:07:12 +0000 (20:07 -0800)
syzbot found that mixing sendpage() and sendmsg(MSG_ZEROCOPY)
calls over the same TCP socket would again trigger the
infamous warning in inet_sock_destruct()

WARN_ON(sk_forward_alloc_get(sk));

While Talal took into account a mix of regular copied data
and MSG_ZEROCOPY one in the same skb, the sendpage() path
has been forgotten.

We want the charging to happen for sendpage(), because
pages could be coming from a pipe. What is missing is the
downgrading of pure zerocopy status to make sure
sk_forward_alloc will stay synced.

Add tcp_downgrade_zcopy_pure() helper so that we can
use it from the two callers.

Fixes: 402623a4cc78 ("net: avoid double accounting for pure zerocopy skbs")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: syzbot <syzkaller@googlegroups.com>
Cc: Talal Ahmad <talalahmad@google.com>
Cc: Arjun Roy <arjunroy@google.com>
Cc: Willem de Bruijn <willemb@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Link: https://lore.kernel.org/r/20220203225547.665114-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/ipv4/tcp.c

index bdf108f544a45a2aa24bc962fb81dfd0ca1e0682..02cb275e5487d98b3e124ee102163aac47b2ad6d 100644 (file)
@@ -937,6 +937,22 @@ void tcp_remove_empty_skb(struct sock *sk)
        }
 }
 
+/* skb changing from pure zc to mixed, must charge zc */
+static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
+{
+       if (unlikely(skb_zcopy_pure(skb))) {
+               u32 extra = skb->truesize -
+                           SKB_TRUESIZE(skb_end_offset(skb));
+
+               if (!sk_wmem_schedule(sk, extra))
+                       return -ENOMEM;
+
+               sk_mem_charge(sk, extra);
+               skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
+       }
+       return 0;
+}
+
 static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
                                      struct page *page, int offset, size_t *size)
 {
@@ -972,7 +988,7 @@ new_segment:
                tcp_mark_push(tp, skb);
                goto new_segment;
        }
-       if (!sk_wmem_schedule(sk, copy))
+       if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy))
                return NULL;
 
        if (can_coalesce) {
@@ -1320,19 +1336,8 @@ new_segment:
 
                        copy = min_t(int, copy, pfrag->size - pfrag->offset);
 
-                       /* skb changing from pure zc to mixed, must charge zc */
-                       if (unlikely(skb_zcopy_pure(skb))) {
-                               u32 extra = skb->truesize -
-                                           SKB_TRUESIZE(skb_end_offset(skb));
-
-                               if (!sk_wmem_schedule(sk, extra))
-                                       goto wait_for_space;
-
-                               sk_mem_charge(sk, extra);
-                               skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
-                       }
-
-                       if (!sk_wmem_schedule(sk, copy))
+                       if (tcp_downgrade_zcopy_pure(sk, skb) ||
+                           !sk_wmem_schedule(sk, copy))
                                goto wait_for_space;
 
                        err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,