]> git.baikalelectronics.ru Git - kernel.git/commitdiff
watchqueue: make sure to serialize 'wqueue->defunct' properly
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 19 Jul 2022 18:09:01 +0000 (11:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 20 Jul 2022 17:46:07 +0000 (10:46 -0700)
When the pipe is closed, we mark the associated watchqueue defunct by
calling watch_queue_clear().  However, while that is protected by the
watchqueue lock, new watchqueue entries aren't actually added under that
lock at all: they use the pipe->rd_wait.lock instead, and looking up
that pipe happens without any locking.

The watchqueue code uses the RCU read-side section to make sure that the
wqueue entry itself hasn't disappeared, but that does not protect the
pipe_info in any way.

So make sure to actually hold the wqueue lock when posting watch events,
properly serializing against the pipe being torn down.

Reported-by: Noam Rathaus <noamr@ssd-disclosure.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/watch_queue.c

index 230038d4f90818843b5ff69274fea1aa14f5d6ac..8b28fad1319bd0d6bdfb30db651b264b024ae64f 100644 (file)
@@ -34,6 +34,27 @@ MODULE_LICENSE("GPL");
 #define WATCH_QUEUE_NOTE_SIZE 128
 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
 
+/*
+ * This must be called under the RCU read-lock, which makes
+ * sure that the wqueue still exists. It can then take the lock,
+ * and check that the wqueue hasn't been destroyed, which in
+ * turn makes sure that the notification pipe still exists.
+ */
+static inline bool lock_wqueue(struct watch_queue *wqueue)
+{
+       spin_lock_bh(&wqueue->lock);
+       if (unlikely(wqueue->defunct)) {
+               spin_unlock_bh(&wqueue->lock);
+               return false;
+       }
+       return true;
+}
+
+static inline void unlock_wqueue(struct watch_queue *wqueue)
+{
+       spin_unlock_bh(&wqueue->lock);
+}
+
 static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe,
                                         struct pipe_buffer *buf)
 {
@@ -69,6 +90,10 @@ static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
 
 /*
  * Post a notification to a watch queue.
+ *
+ * Must be called with the RCU lock for reading, and the
+ * watch_queue lock held, which guarantees that the pipe
+ * hasn't been released.
  */
 static bool post_one_notification(struct watch_queue *wqueue,
                                  struct watch_notification *n)
@@ -85,9 +110,6 @@ static bool post_one_notification(struct watch_queue *wqueue,
 
        spin_lock_irq(&pipe->rd_wait.lock);
 
-       if (wqueue->defunct)
-               goto out;
-
        mask = pipe->ring_size - 1;
        head = pipe->head;
        tail = pipe->tail;
@@ -203,7 +225,10 @@ void __post_watch_notification(struct watch_list *wlist,
                if (security_post_notification(watch->cred, cred, n) < 0)
                        continue;
 
-               post_one_notification(wqueue, n);
+               if (lock_wqueue(wqueue)) {
+                       post_one_notification(wqueue, n);
+                       unlock_wqueue(wqueue);;
+               }
        }
 
        rcu_read_unlock();
@@ -462,11 +487,12 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist)
                return -EAGAIN;
        }
 
-       spin_lock_bh(&wqueue->lock);
-       kref_get(&wqueue->usage);
-       kref_get(&watch->usage);
-       hlist_add_head(&watch->queue_node, &wqueue->watches);
-       spin_unlock_bh(&wqueue->lock);
+       if (lock_wqueue(wqueue)) {
+               kref_get(&wqueue->usage);
+               kref_get(&watch->usage);
+               hlist_add_head(&watch->queue_node, &wqueue->watches);
+               unlock_wqueue(wqueue);
+       }
 
        hlist_add_head(&watch->list_node, &wlist->watchers);
        return 0;
@@ -520,20 +546,15 @@ found:
 
        wqueue = rcu_dereference(watch->queue);
 
-       /* We don't need the watch list lock for the next bit as RCU is
-        * protecting *wqueue from deallocation.
-        */
-       if (wqueue) {
+       if (lock_wqueue(wqueue)) {
                post_one_notification(wqueue, &n.watch);
 
-               spin_lock_bh(&wqueue->lock);
-
                if (!hlist_unhashed(&watch->queue_node)) {
                        hlist_del_init_rcu(&watch->queue_node);
                        put_watch(watch);
                }
 
-               spin_unlock_bh(&wqueue->lock);
+               unlock_wqueue(wqueue);
        }
 
        if (wlist->release_watch) {