]> git.baikalelectronics.ru Git - kernel.git/commitdiff
btrfs: turn fs_info member buffer_radix into XArray
authorGabriel Niebler <gniebler@suse.com>
Thu, 21 Apr 2022 15:45:38 +0000 (17:45 +0200)
committerDavid Sterba <dsterba@suse.com>
Mon, 16 May 2022 15:03:16 +0000 (17:03 +0200)
… named 'extent_buffers'. Also adjust all usages of this object to use
the XArray API, which greatly simplifies the code as it takes care of
locking and is generally easier to use and understand, providing
notionally simpler array semantics.

Also perform some light refactoring.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Gabriel Niebler <gniebler@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/tests/btrfs-tests.c

index 9eebd96c663992ee486806f9a8fe044d3cfe920c..fb299fe53a89dd4c73d168cf75797b61f58c1e78 100644 (file)
@@ -994,10 +994,10 @@ struct btrfs_fs_info {
 
        struct btrfs_delayed_root *delayed_root;
 
-       /* Extent buffer radix tree */
+       /* Extent buffer xarray */
        spinlock_t buffer_lock;
        /* Entries are eb->start / sectorsize */
-       struct radix_tree_root buffer_radix;
+       struct xarray extent_buffers;
 
        /* next backup root to be overwritten */
        int backup_root_index;
index 15eccd8f3b1bc3057cd6713adca23442763d9abd..6b43373d2d853033e85dd1f5781e4886467e0e4b 100644 (file)
@@ -486,7 +486,7 @@ static int csum_dirty_subpage_buffers(struct btrfs_fs_info *fs_info,
                uptodate = btrfs_subpage_test_uptodate(fs_info, page, cur,
                                                       fs_info->nodesize);
 
-               /* A dirty eb shouldn't disappear from buffer_radix */
+               /* A dirty eb shouldn't disappear from extent_buffers */
                if (WARN_ON(!eb))
                        return -EUCLEAN;
 
@@ -3151,7 +3151,7 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
 void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
 {
        INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
-       INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
+       xa_init_flags(&fs_info->extent_buffers, GFP_ATOMIC);
        INIT_LIST_HEAD(&fs_info->trans_list);
        INIT_LIST_HEAD(&fs_info->dead_roots);
        INIT_LIST_HEAD(&fs_info->delayed_iputs);
index 07888cce3bce637b7a8a1dc9c8d25a55f021a42f..66636e43e33950236ea5b9ac1379300ed279c59d 100644 (file)
@@ -2965,7 +2965,7 @@ static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
 }
 
 /*
- * Find extent buffer for a givne bytenr.
+ * Find extent buffer for a given bytenr.
  *
  * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
  * in endio context.
@@ -2984,11 +2984,9 @@ static struct extent_buffer *find_extent_buffer_readpage(
                return (struct extent_buffer *)page->private;
        }
 
-       /* For subpage case, we need to lookup buffer radix tree */
-       rcu_read_lock();
-       eb = radix_tree_lookup(&fs_info->buffer_radix,
-                              bytenr >> fs_info->sectorsize_bits);
-       rcu_read_unlock();
+       /* For subpage case, we need to lookup extent buffer xarray */
+       eb = xa_load(&fs_info->extent_buffers,
+                    bytenr >> fs_info->sectorsize_bits);
        ASSERT(eb);
        return eb;
 }
@@ -4447,8 +4445,8 @@ static struct extent_buffer *find_extent_buffer_nolock(
        struct extent_buffer *eb;
 
        rcu_read_lock();
-       eb = radix_tree_lookup(&fs_info->buffer_radix,
-                              start >> fs_info->sectorsize_bits);
+       eb = xa_load(&fs_info->extent_buffers,
+                    start >> fs_info->sectorsize_bits);
        if (eb && atomic_inc_not_zero(&eb->refs)) {
                rcu_read_unlock();
                return eb;
@@ -6141,24 +6139,22 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
        if (!eb)
                return ERR_PTR(-ENOMEM);
        eb->fs_info = fs_info;
-again:
-       ret = radix_tree_preload(GFP_NOFS);
-       if (ret) {
-               exists = ERR_PTR(ret);
-               goto free_eb;
-       }
-       spin_lock(&fs_info->buffer_lock);
-       ret = radix_tree_insert(&fs_info->buffer_radix,
-                               start >> fs_info->sectorsize_bits, eb);
-       spin_unlock(&fs_info->buffer_lock);
-       radix_tree_preload_end();
-       if (ret == -EEXIST) {
-               exists = find_extent_buffer(fs_info, start);
-               if (exists)
+
+       do {
+               ret = xa_insert(&fs_info->extent_buffers,
+                               start >> fs_info->sectorsize_bits,
+                               eb, GFP_NOFS);
+               if (ret == -ENOMEM) {
+                       exists = ERR_PTR(ret);
                        goto free_eb;
-               else
-                       goto again;
-       }
+               }
+               if (ret == -EBUSY) {
+                       exists = find_extent_buffer(fs_info, start);
+                       if (exists)
+                               goto free_eb;
+               }
+       } while (ret);
+
        check_buffer_tree_ref(eb);
        set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
 
@@ -6333,25 +6329,22 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        }
        if (uptodate)
                set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
-again:
-       ret = radix_tree_preload(GFP_NOFS);
-       if (ret) {
-               exists = ERR_PTR(ret);
-               goto free_eb;
-       }
-
-       spin_lock(&fs_info->buffer_lock);
-       ret = radix_tree_insert(&fs_info->buffer_radix,
-                               start >> fs_info->sectorsize_bits, eb);
-       spin_unlock(&fs_info->buffer_lock);
-       radix_tree_preload_end();
-       if (ret == -EEXIST) {
-               exists = find_extent_buffer(fs_info, start);
-               if (exists)
+
+       do {
+               ret = xa_insert(&fs_info->extent_buffers,
+                               start >> fs_info->sectorsize_bits,
+                               eb, GFP_NOFS);
+               if (ret == -ENOMEM) {
+                       exists = ERR_PTR(ret);
                        goto free_eb;
-               else
-                       goto again;
-       }
+               }
+               if (ret == -EBUSY) {
+                       exists = find_extent_buffer(fs_info, start);
+                       if (exists)
+                               goto free_eb;
+               }
+       } while (ret);
+
        /* add one reference for the tree */
        check_buffer_tree_ref(eb);
        set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
@@ -6396,10 +6389,8 @@ static int release_extent_buffer(struct extent_buffer *eb)
 
                        spin_unlock(&eb->refs_lock);
 
-                       spin_lock(&fs_info->buffer_lock);
-                       radix_tree_delete(&fs_info->buffer_radix,
-                                         eb->start >> fs_info->sectorsize_bits);
-                       spin_unlock(&fs_info->buffer_lock);
+                       xa_erase(&fs_info->extent_buffers,
+                                eb->start >> fs_info->sectorsize_bits);
                } else {
                        spin_unlock(&eb->refs_lock);
                }
@@ -7344,42 +7335,25 @@ void memmove_extent_buffer(const struct extent_buffer *dst,
        }
 }
 
-#define GANG_LOOKUP_SIZE       16
 static struct extent_buffer *get_next_extent_buffer(
                struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
 {
-       struct extent_buffer *gang[GANG_LOOKUP_SIZE];
-       struct extent_buffer *found = NULL;
+       struct extent_buffer *eb;
+       unsigned long index;
        u64 page_start = page_offset(page);
-       u64 cur = page_start;
 
        ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
        lockdep_assert_held(&fs_info->buffer_lock);
 
-       while (cur < page_start + PAGE_SIZE) {
-               int ret;
-               int i;
-
-               ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
-                               (void **)gang, cur >> fs_info->sectorsize_bits,
-                               min_t(unsigned int, GANG_LOOKUP_SIZE,
-                                     PAGE_SIZE / fs_info->nodesize));
-               if (ret == 0)
-                       goto out;
-               for (i = 0; i < ret; i++) {
-                       /* Already beyond page end */
-                       if (gang[i]->start >= page_start + PAGE_SIZE)
-                               goto out;
-                       /* Found one */
-                       if (gang[i]->start >= bytenr) {
-                               found = gang[i];
-                               goto out;
-                       }
-               }
-               cur = gang[ret - 1]->start + gang[ret - 1]->len;
+       xa_for_each_start(&fs_info->extent_buffers, index, eb,
+                         page_start >> fs_info->sectorsize_bits) {
+               if (in_range(eb->start, page_start, PAGE_SIZE))
+                       return eb;
+               else if (eb->start >= page_start + PAGE_SIZE)
+                       /* Already beyond page end */
+                       return NULL;
        }
-out:
-       return found;
+       return NULL;
 }
 
 static int try_release_subpage_extent_buffer(struct page *page)
index d8e56edd69910d66494b1113d5793ca557229f62..c8c4efc9a3fb2ddeeb1899ca1b52c9ff883b7485 100644 (file)
@@ -150,8 +150,8 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
 
 void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
 {
-       struct radix_tree_iter iter;
-       void **slot;
+       unsigned long index;
+       struct extent_buffer *eb;
        struct btrfs_device *dev, *tmp;
 
        if (!fs_info)
@@ -163,25 +163,9 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
 
        test_mnt->mnt_sb->s_fs_info = NULL;
 
-       spin_lock(&fs_info->buffer_lock);
-       radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
-               struct extent_buffer *eb;
-
-               eb = radix_tree_deref_slot_protected(slot, &fs_info->buffer_lock);
-               if (!eb)
-                       continue;
-               /* Shouldn't happen but that kind of thinking creates CVE's */
-               if (radix_tree_exception(eb)) {
-                       if (radix_tree_deref_retry(eb))
-                               slot = radix_tree_iter_retry(&iter);
-                       continue;
-               }
-               slot = radix_tree_iter_resume(slot, &iter);
-               spin_unlock(&fs_info->buffer_lock);
+       xa_for_each(&fs_info->extent_buffers, index, eb) {
                free_extent_buffer_stale(eb);
-               spin_lock(&fs_info->buffer_lock);
        }
-       spin_unlock(&fs_info->buffer_lock);
 
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
        list_for_each_entry_safe(dev, tmp, &fs_info->fs_devices->devices,