]> git.baikalelectronics.ru Git - kernel.git/commitdiff
btrfs: raid56: make raid56_add_scrub_pages() subpage compatible
authorQu Wenruo <wqu@suse.com>
Fri, 1 Apr 2022 11:23:26 +0000 (19:23 +0800)
committerDavid Sterba <dsterba@suse.com>
Mon, 16 May 2022 15:03:15 +0000 (17:03 +0200)
This requires one extra parameter @pgoff for the function.

In the current code base, scrub is still one page per sector, thus the
new parameter will always be 0.

It needs the extra subpage scrub optimization code to fully take
advantage.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/raid56.c
fs/btrfs/raid56.h
fs/btrfs/scrub.c

index 84eb4890eea1564c69831eeb8220d8210a47e321..1f310bd381a8d1bbf2aeb36e0b55ca7e50ae6ab0 100644 (file)
@@ -2381,17 +2381,19 @@ struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
 
 /* Used for both parity scrub and missing. */
 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
-                           u64 logical)
+                           unsigned int pgoff, u64 logical)
 {
+       const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
        int stripe_offset;
        int index;
 
        ASSERT(logical >= rbio->bioc->raid_map[0]);
-       ASSERT(logical + PAGE_SIZE <= rbio->bioc->raid_map[0] +
+       ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] +
                                rbio->stripe_len * rbio->nr_data);
        stripe_offset = (int)(logical - rbio->bioc->raid_map[0]);
-       index = stripe_offset >> PAGE_SHIFT;
-       rbio->bio_pages[index] = page;
+       index = stripe_offset / sectorsize;
+       rbio->bio_sectors[index].page = page;
+       rbio->bio_sectors[index].pgoff = pgoff;
 }
 
 /*
index 006b4741e5c13fa96fecb959120cda054d408f5f..aaad08aefd7d085a984a8e1458bffcf52153fc33 100644 (file)
@@ -35,7 +35,7 @@ int raid56_parity_recover(struct bio *bio, struct btrfs_io_context *bioc,
 int raid56_parity_write(struct bio *bio, struct btrfs_io_context *bioc, u32 stripe_len);
 
 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
-                           u64 logical);
+                           unsigned int pgoff, u64 logical);
 
 struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
                                struct btrfs_io_context *bioc, u32 stripe_len,
index b79a3221d7afccad3e974c699a221a283cd14081..6ac711fa793cab9c8eed0661cf0472e39f5872d3 100644 (file)
@@ -2205,7 +2205,11 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
        for (i = 0; i < sblock->sector_count; i++) {
                struct scrub_sector *sector = sblock->sectors[i];
 
-               raid56_add_scrub_pages(rbio, sector->page, sector->logical);
+               /*
+                * For now, our scrub is still one page per sector, so pgoff
+                * is always 0.
+                */
+               raid56_add_scrub_pages(rbio, sector->page, 0, sector->logical);
        }
 
        btrfs_init_work(&sblock->work, scrub_missing_raid56_worker, NULL, NULL);