]> git.baikalelectronics.ru Git - kernel.git/commitdiff
net: fix a lockdep splat
authorEric Dumazet <eric.dumazet@gmail.com>
Wed, 22 Sep 2010 12:43:39 +0000 (12:43 +0000)
committerDavid S. Miller <davem@davemloft.net>
Sat, 25 Sep 2010 05:26:10 +0000 (22:26 -0700)
We have for each socket :

One spinlock (sk_slock.slock)
One rwlock (sk_callback_lock)

Possible scenarios are :

(A) (this is used in net/sunrpc/xprtsock.c)
read_lock(&sk->sk_callback_lock) (without blocking BH)
<BH>
spin_lock(&sk->sk_slock.slock);
...
read_lock(&sk->sk_callback_lock);
...

(B)
write_lock_bh(&sk->sk_callback_lock)
stuff
write_unlock_bh(&sk->sk_callback_lock)

(C)
spin_lock_bh(&sk->sk_slock)
...
write_lock_bh(&sk->sk_callback_lock)
stuff
write_unlock_bh(&sk->sk_callback_lock)
spin_unlock_bh(&sk->sk_slock)

This (C) case conflicts with (A) :

CPU1 [A]                         CPU2 [C]
read_lock(callback_lock)
<BH>                             spin_lock_bh(slock)
<wait to spin_lock(slock)>
                                 <wait to write_lock_bh(callback_lock)>

We have one problematic (C) use case in inet_csk_listen_stop() :

local_bh_disable();
bh_lock_sock(child); // spin_lock_bh(&sk->sk_slock)
WARN_ON(sock_owned_by_user(child));
...
sock_orphan(child); // write_lock_bh(&sk->sk_callback_lock)

lockdep is not happy with this, as reported by Tetsuo Handa

It seems only way to deal with this is to use read_lock_bh(callbacklock)
everywhere.

Thanks to Jarek for pointing a bug in my first attempt and suggesting
this solution.

Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Tested-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Jarek Poplawski <jarkao2@gmail.com>
Tested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/sock.c
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c
net/sunrpc/xprtsock.c

index b05b9b6ddb8700989e63e8597f6946ffd205bdba..ef30e9d286e703cccaffbd5ee10394cc167fe057 100644 (file)
@@ -1351,9 +1351,9 @@ int sock_i_uid(struct sock *sk)
 {
        int uid;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        return uid;
 }
 EXPORT_SYMBOL(sock_i_uid);
@@ -1362,9 +1362,9 @@ unsigned long sock_i_ino(struct sock *sk)
 {
        unsigned long ino;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        return ino;
 }
 EXPORT_SYMBOL(sock_i_ino);
index c397524c039cdb28140ff5f6c0fe2359cebfa3b5..c519939e8da98fd3ae8252355a0ebb9efac4acd5 100644 (file)
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (conn == NULL) {
                state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
                        break;
        }
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        state_change(sk);
 }
 
index 975183fe6950a34b242ef55db011e6f04c1c6772..27844f231d103a4e49e542d670eaca66d01e761d 100644 (file)
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("listen data ready sk %p\n", sk);
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        ready = sk->sk_user_data;
        if (ready == NULL) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
                queue_work(rds_wq, &rds_tcp_listen_work);
 
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 1aba6878fa5dc42d4c54473350fde61d714a184c..e43797404102efcc2ed45117456e839b5425adb1 100644 (file)
@@ -324,7 +324,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (conn == NULL) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -338,7 +338,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
        if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
                queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index a28b895ff0d10194730463b218e3ccb526cdea50..2f012a07d94d16d3aa01d4c78d0894e939dfd751 100644 (file)
@@ -224,7 +224,7 @@ void rds_tcp_write_space(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (conn == NULL) {
                write_space = sk->sk_write_space;
@@ -244,7 +244,7 @@ void rds_tcp_write_space(struct sock *sk)
                queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 
        /*
         * write_space is only called when data leaves tcp's send queue if
index b6309db5622689deaf1c76a6acb1f6a09a02602c..fe9306bf10cc7f3bba4590ddf853c2ac39eadae9 100644 (file)
@@ -800,7 +800,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
        u32 _xid;
        __be32 *xp;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        dprintk("RPC:       xs_udp_data_ready...\n");
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
@@ -852,7 +852,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
  dropit:
        skb_free_datagram(sk, skb);
  out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
@@ -1229,7 +1229,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
 
        dprintk("RPC:       xs_tcp_data_ready...\n");
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
        if (xprt->shutdown)
@@ -1248,7 +1248,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
                read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
        } while (read > 0);
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 /*
@@ -1301,7 +1301,7 @@ static void xs_tcp_state_change(struct sock *sk)
 {
        struct rpc_xprt *xprt;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
        dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
@@ -1313,7 +1313,7 @@ static void xs_tcp_state_change(struct sock *sk)
 
        switch (sk->sk_state) {
        case TCP_ESTABLISHED:
-               spin_lock_bh(&xprt->transport_lock);
+               spin_lock(&xprt->transport_lock);
                if (!xprt_test_and_set_connected(xprt)) {
                        struct sock_xprt *transport = container_of(xprt,
                                        struct sock_xprt, xprt);
@@ -1327,7 +1327,7 @@ static void xs_tcp_state_change(struct sock *sk)
 
                        xprt_wake_pending_tasks(xprt, -EAGAIN);
                }
-               spin_unlock_bh(&xprt->transport_lock);
+               spin_unlock(&xprt->transport_lock);
                break;
        case TCP_FIN_WAIT1:
                /* The client initiated a shutdown of the socket */
@@ -1365,7 +1365,7 @@ static void xs_tcp_state_change(struct sock *sk)
                xs_sock_mark_closed(xprt);
        }
  out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 /**
@@ -1376,7 +1376,7 @@ static void xs_error_report(struct sock *sk)
 {
        struct rpc_xprt *xprt;
 
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
        dprintk("RPC:       %s client %p...\n"
@@ -1384,7 +1384,7 @@ static void xs_error_report(struct sock *sk)
                        __func__, xprt, sk->sk_err);
        xprt_wake_pending_tasks(xprt, -EAGAIN);
 out:
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static void xs_write_space(struct sock *sk)
@@ -1416,13 +1416,13 @@ static void xs_write_space(struct sock *sk)
  */
 static void xs_udp_write_space(struct sock *sk)
 {
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
 
        /* from net/core/sock.c:sock_def_write_space */
        if (sock_writeable(sk))
                xs_write_space(sk);
 
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 /**
@@ -1437,13 +1437,13 @@ static void xs_udp_write_space(struct sock *sk)
  */
 static void xs_tcp_write_space(struct sock *sk)
 {
-       read_lock(&sk->sk_callback_lock);
+       read_lock_bh(&sk->sk_callback_lock);
 
        /* from net/core/stream.c:sk_stream_write_space */
        if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
                xs_write_space(sk);
 
-       read_unlock(&sk->sk_callback_lock);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)