]> git.baikalelectronics.ru Git - kernel.git/commitdiff
btrfs: implement a nowait option for tree searches
authorJosef Bacik <josef@toxicpanda.com>
Mon, 12 Sep 2022 19:27:42 +0000 (12:27 -0700)
committerDavid Sterba <dsterba@suse.com>
Mon, 26 Sep 2022 10:46:42 +0000 (12:46 +0200)
For NOWAIT IOCBs we'll need a way to tell search to not wait on locks
or anything.  Accomplish this by adding a path->nowait flag that will
use trylocks and skip reading of metadata, returning -EAGAIN in either
of these cases.  For now we only need this for reads, so only the read
side is handled.  Add an ASSERT() to catch anybody trying to use this
for writes so they know they'll have to implement the write side.

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Stefan Roesch <shr@fb.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/locking.c
fs/btrfs/locking.h

index ebfa35fe1c38b0fe6194ee43abc0e79c5cd89978..84548b4074f738134f1b5f27cda62d305c722295 100644 (file)
@@ -1447,6 +1447,11 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
                        return 0;
                }
 
+               if (p->nowait) {
+                       free_extent_buffer(tmp);
+                       return -EAGAIN;
+               }
+
                if (unlock_up)
                        btrfs_unlock_up_safe(p, level + 1);
 
@@ -1467,6 +1472,8 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
                        ret = -EAGAIN;
 
                goto out;
+       } else if (p->nowait) {
+               return -EAGAIN;
        }
 
        if (unlock_up) {
@@ -1634,7 +1641,13 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
                 * We don't know the level of the root node until we actually
                 * have it read locked
                 */
-               b = btrfs_read_lock_root_node(root);
+               if (p->nowait) {
+                       b = btrfs_try_read_lock_root_node(root);
+                       if (IS_ERR(b))
+                               return b;
+               } else {
+                       b = btrfs_read_lock_root_node(root);
+               }
                level = btrfs_header_level(b);
                if (level > write_lock_level)
                        goto out;
@@ -1910,6 +1923,13 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
        WARN_ON(p->nodes[0] != NULL);
        BUG_ON(!cow && ins_len);
 
+       /*
+        * For now only allow nowait for read only operations.  There's no
+        * strict reason why we can't, we just only need it for reads so it's
+        * only implemented for reads.
+        */
+       ASSERT(!p->nowait || !cow);
+
        if (ins_len < 0) {
                lowest_unlock = 2;
 
@@ -1936,7 +1956,12 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
 
        if (p->need_commit_sem) {
                ASSERT(p->search_commit_root);
-               down_read(&fs_info->commit_root_sem);
+               if (p->nowait) {
+                       if (!down_read_trylock(&fs_info->commit_root_sem))
+                               return -EAGAIN;
+               } else {
+                       down_read(&fs_info->commit_root_sem);
+               }
        }
 
 again:
@@ -2082,7 +2107,15 @@ cow_done:
                                btrfs_tree_lock(b);
                                p->locks[level] = BTRFS_WRITE_LOCK;
                        } else {
-                               btrfs_tree_read_lock(b);
+                               if (p->nowait) {
+                                       if (!btrfs_try_tree_read_lock(b)) {
+                                               free_extent_buffer(b);
+                                               ret = -EAGAIN;
+                                               goto done;
+                                       }
+                               } else {
+                                       btrfs_tree_read_lock(b);
+                               }
                                p->locks[level] = BTRFS_READ_LOCK;
                        }
                        p->nodes[level] = b;
index 38c4c4fa27624fd02c7efad95be751257c25cf6f..17af7e317e8e47c17c376e77891f405f160f03b8 100644 (file)
@@ -443,6 +443,8 @@ struct btrfs_path {
         * header (ie. sizeof(struct btrfs_item) is not included).
         */
        unsigned int search_for_extension:1;
+       /* Stop search if any locks need to be taken (for read) */
+       unsigned int nowait:1;
 };
 
 struct btrfs_dev_replace {
index 9063072b399bd833423b7faeb84f523a54ef0f88..0eab3cb274a183862c2dc4771ef3ee556f0c59f4 100644 (file)
@@ -285,6 +285,31 @@ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
        return eb;
 }
 
+/*
+ * Loop around taking references on and locking the root node of the tree in
+ * nowait mode until we end up with a lock on the root node or returning to
+ * avoid blocking.
+ *
+ * Return: root extent buffer with read lock held or -EAGAIN.
+ */
+struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root)
+{
+       struct extent_buffer *eb;
+
+       while (1) {
+               eb = btrfs_root_node(root);
+               if (!btrfs_try_tree_read_lock(eb)) {
+                       free_extent_buffer(eb);
+                       return ERR_PTR(-EAGAIN);
+               }
+               if (eb == root->node)
+                       break;
+               btrfs_tree_read_unlock(eb);
+               free_extent_buffer(eb);
+       }
+       return eb;
+}
+
 /*
  * DREW locks
  * ==========
index ab268be09bb542fe5df6aa53fd59c9f6977f68a6..490c7a79e9959871c4c5e277fec02e955e87e824 100644 (file)
@@ -94,6 +94,7 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb);
 int btrfs_try_tree_write_lock(struct extent_buffer *eb);
 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
+struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root);
 
 #ifdef CONFIG_BTRFS_DEBUG
 static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)