]> git.baikalelectronics.ru Git - kernel.git/commitdiff
btrfs: zero out left over bytes after processing compression streams
authorChris Mason <clm@fb.com>
Sun, 30 Nov 2014 13:56:33 +0000 (08:56 -0500)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 30 Nov 2014 17:33:51 +0000 (09:33 -0800)
Don Bailey noticed that our page zeroing for compression at end-io time
isn't complete.  This reworks a patch from Linus to push the zeroing
into the zlib and lzo specific functions instead of trying to handle the
corners inside btrfs_decompress_buf2page

Signed-off-by: Chris Mason <clm@fb.com>
Reviewed-by: Josef Bacik <jbacik@fb.com>
Reported-by: Don A. Bailey <donb@securitymouse.com>
cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/lzo.c
fs/btrfs/zlib.c

index d3220d31d3cbf0e653898d15816f5895045c06d5..dcd9be32ac579451597dcf61e97b54631a69aa68 100644 (file)
@@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
                bytes = min(bytes, working_bytes);
                kaddr = kmap_atomic(page_out);
                memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
-               if (*pg_index == (vcnt - 1) && *pg_offset == 0)
-                       memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
                kunmap_atomic(kaddr);
                flush_dcache_page(page_out);
 
@@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 
        return 1;
 }
+
+/*
+ * When uncompressing data, we need to make sure and zero any parts of
+ * the biovec that were not filled in by the decompression code.  pg_index
+ * and pg_offset indicate the last page and the last offset of that page
+ * that have been filled in.  This will zero everything remaining in the
+ * biovec.
+ */
+void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
+                                  unsigned long pg_index,
+                                  unsigned long pg_offset)
+{
+       while (pg_index < vcnt) {
+               struct page *page = bvec[pg_index].bv_page;
+               unsigned long off = bvec[pg_index].bv_offset;
+               unsigned long len = bvec[pg_index].bv_len;
+
+               if (pg_offset < off)
+                       pg_offset = off;
+               if (pg_offset < off + len) {
+                       unsigned long bytes = off + len - pg_offset;
+                       char *kaddr;
+
+                       kaddr = kmap_atomic(page);
+                       memset(kaddr + pg_offset, 0, bytes);
+                       kunmap_atomic(kaddr);
+               }
+               pg_index++;
+               pg_offset = 0;
+       }
+}
index 0c803b4fbf93dc8062e644952abb7f36ff0e8504..d181f70caae01471ca80e181818a833b41f057de 100644 (file)
@@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                  unsigned long nr_pages);
 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags);
-
+void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
+                                  unsigned long pg_index,
+                                  unsigned long pg_offset);
 struct btrfs_compress_op {
        struct list_head *(*alloc_workspace)(void);
 
index 78285f30909edd09f19cc6eb48985d47eed3c565..617553cdb7d3b36b1b8ca6a87b28526c5a060b4d 100644 (file)
@@ -373,6 +373,8 @@ cont:
        }
 done:
        kunmap(pages_in[page_in_index]);
+       if (!ret)
+               btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
        return ret;
 }
 
@@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
                goto out;
        }
 
+       /*
+        * the caller is already checking against PAGE_SIZE, but lets
+        * move this check closer to the memcpy/memset
+        */
+       destlen = min_t(unsigned long, destlen, PAGE_SIZE);
        bytes = min_t(unsigned long, destlen, out_len - start_byte);
 
        kaddr = kmap_atomic(dest_page);
        memcpy(kaddr, workspace->buf + start_byte, bytes);
+
+       /*
+        * btrfs_getblock is doing a zero on the tail of the page too,
+        * but this will cover anything missing from the decompressed
+        * data.
+        */
+       if (bytes < destlen)
+               memset(kaddr+bytes, 0, destlen-bytes);
        kunmap_atomic(kaddr);
 out:
        return ret;
index 759fa4e2de8fec28d3f6448456e1e12cec1add17..fb22fd8d8fb8fad73cb4d2b63d4d525da3eea876 100644 (file)
@@ -299,6 +299,8 @@ done:
        zlib_inflateEnd(&workspace->strm);
        if (data_in)
                kunmap(pages_in[page_in_index]);
+       if (!ret)
+               btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
        return ret;
 }
 
@@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
        struct workspace *workspace = list_entry(ws, struct workspace, list);
        int ret = 0;
        int wbits = MAX_WBITS;
-       unsigned long bytes_left = destlen;
+       unsigned long bytes_left;
        unsigned long total_out = 0;
+       unsigned long pg_offset = 0;
        char *kaddr;
 
+       destlen = min_t(unsigned long, destlen, PAGE_SIZE);
+       bytes_left = destlen;
+
        workspace->strm.next_in = data_in;
        workspace->strm.avail_in = srclen;
        workspace->strm.total_in = 0;
@@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
                unsigned long buf_start;
                unsigned long buf_offset;
                unsigned long bytes;
-               unsigned long pg_offset = 0;
 
                ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
                if (ret != Z_OK && ret != Z_STREAM_END)
@@ -384,6 +389,17 @@ next:
                ret = 0;
 
        zlib_inflateEnd(&workspace->strm);
+
+       /*
+        * this should only happen if zlib returned fewer bytes than we
+        * expected.  btrfs_get_block is responsible for zeroing from the
+        * end of the inline extent (destlen) to the end of the page
+        */
+       if (pg_offset < destlen) {
+               kaddr = kmap_atomic(dest_page);
+               memset(kaddr + pg_offset, 0, destlen - pg_offset);
+               kunmap_atomic(kaddr);
+       }
        return ret;
 }