]> git.baikalelectronics.ru Git - kernel.git/commitdiff
btrfs: extend btrfs_cleanup_ordered_extents for NULL locked_page
authorNaohiro Aota <naohiro.aota@wdc.com>
Tue, 21 Jun 2022 06:41:00 +0000 (15:41 +0900)
committerDavid Sterba <dsterba@suse.com>
Mon, 25 Jul 2022 15:45:38 +0000 (17:45 +0200)
btrfs_cleanup_ordered_extents() assumes locked_page to be non-NULL, so it
is not usable for submit_uncompressed_range() which can have NULL
locked_page.

Add support supports locked_page == NULL case. Also, it rewrites
redundant "page_offset(locked_page)".

Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/inode.c

index 5a58042a404b9af3703f5ac52f98bf0556022351..11ff5bb40153520cb78a2848ea16a7e5caa9f943 100644 (file)
@@ -190,11 +190,14 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
 {
        unsigned long index = offset >> PAGE_SHIFT;
        unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
-       u64 page_start = page_offset(locked_page);
-       u64 page_end = page_start + PAGE_SIZE - 1;
-
+       u64 page_start, page_end;
        struct page *page;
 
+       if (locked_page) {
+               page_start = page_offset(locked_page);
+               page_end = page_start + PAGE_SIZE - 1;
+       }
+
        while (index <= end_index) {
                /*
                 * For locked page, we will call end_extent_writepage() on it
@@ -207,7 +210,7 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
                 * btrfs_mark_ordered_io_finished() would skip the accounting
                 * for the page range, and the ordered extent will never finish.
                 */
-               if (index == (page_offset(locked_page) >> PAGE_SHIFT)) {
+               if (locked_page && index == (page_start >> PAGE_SHIFT)) {
                        index++;
                        continue;
                }
@@ -226,17 +229,20 @@ static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode *inode,
                put_page(page);
        }
 
-       /* The locked page covers the full range, nothing needs to be done */
-       if (bytes + offset <= page_offset(locked_page) + PAGE_SIZE)
-               return;
-       /*
-        * In case this page belongs to the delalloc range being instantiated
-        * then skip it, since the first page of a range is going to be
-        * properly cleaned up by the caller of run_delalloc_range
-        */
-       if (page_start >= offset && page_end <= (offset + bytes - 1)) {
-               bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
-               offset = page_offset(locked_page) + PAGE_SIZE;
+       if (locked_page) {
+               /* The locked page covers the full range, nothing needs to be done */
+               if (bytes + offset <= page_start + PAGE_SIZE)
+                       return;
+               /*
+                * In case this page belongs to the delalloc range being
+                * instantiated then skip it, since the first page of a range is
+                * going to be properly cleaned up by the caller of
+                * run_delalloc_range
+                */
+               if (page_start >= offset && page_end <= (offset + bytes - 1)) {
+                       bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE;
+                       offset = page_offset(locked_page) + PAGE_SIZE;
+               }
        }
 
        return btrfs_mark_ordered_io_finished(inode, NULL, offset, bytes, false);