]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net: use skb_queue_empty_lockless() in busy poll contexts
authorEric Dumazet <edumazet@google.com>
Thu, 24 Oct 2019 05:44:51 +0000 (22:44 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 28 Oct 2019 20:33:41 +0000 (13:33 -0700)
Busy polling usually runs without locks.
Let's use skb_queue_empty_lockless() instead of skb_queue_empty()

Also uses READ_ONCE() in __skb_try_recv_datagram() to address
a similar potential problem.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/nvme/host/tcp.c
net/core/datagram.c
net/core/sock.c
net/ipv4/tcp.c
net/sctp/socket.c

index 0891ab829b1b6b1318353e945e6394fab29f7c1e..98bc5a4cd5e7014990f064a92777308ae98b13e4 100644 (file)
@@ -1702,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                return peekmsg(sk, msg, len, nonblock, flags);
 
        if (sk_can_busy_loop(sk) &&
-           skb_queue_empty(&sk->sk_receive_queue) &&
+           skb_queue_empty_lockless(&sk->sk_receive_queue) &&
            sk->sk_state == TCP_ESTABLISHED)
                sk_busy_loop(sk, nonblock);
 
index 770dbcbc999e0bff81b8643644cab20599a0c2e3..7544be84ab3582e27ded19298b45960ce3460d96 100644 (file)
@@ -2219,7 +2219,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
        struct nvme_tcp_queue *queue = hctx->driver_data;
        struct sock *sk = queue->sock->sk;
 
-       if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue))
+       if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
                sk_busy_loop(sk, true);
        nvme_tcp_try_recv(queue);
        return queue->nr_cqe;
index 5b685e110affab8f6d7cd3050ce88dfddb1357f5..03515e46a49ab60cdd5f643efb3459d16f6021e5 100644 (file)
@@ -278,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                        break;
 
                sk_busy_loop(sk, flags & MSG_DONTWAIT);
-       } while (sk->sk_receive_queue.prev != *last);
+       } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
 
        error = -EAGAIN;
 
index a515392ba84b67b2bf5400e0cfb7c3454fa87af8..b8e758bcb6ad65c93e93fb64f70a61e353a5737e 100644 (file)
@@ -3600,7 +3600,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
 {
        struct sock *sk = p;
 
-       return !skb_queue_empty(&sk->sk_receive_queue) ||
+       return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
               sk_busy_loop_timeout(sk, start_time);
 }
 EXPORT_SYMBOL(sk_busy_loop_end);
index ffef502f52920170478d9fd000a507659e17de15..d8876f0e9672718b4e02bc7aaaac30ecfd4903cb 100644 (file)
@@ -1964,7 +1964,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
 
-       if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
+       if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
            (sk->sk_state == TCP_ESTABLISHED))
                sk_busy_loop(sk, nonblock);
 
index cfb25391b8b0fb66b77db995933103007113aa32..ca81e06df1651f16ab332cd9fc880c21b89a5c6d 100644 (file)
@@ -8871,7 +8871,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
                if (sk_can_busy_loop(sk)) {
                        sk_busy_loop(sk, noblock);
 
-                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                                continue;
                }