]> git.baikalelectronics.ru Git - kernel.git/commitdiff
cifs: Fix memory leak when using fscache
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 18 Jul 2022 19:06:24 +0000 (20:06 +0100)
committerSteve French <stfrench@microsoft.com>
Mon, 1 Aug 2022 06:34:44 +0000 (01:34 -0500)
If we hit the 'index == next_cached' case, we leak a refcount on the
struct page.  Fix this by using readahead_folio() which takes care of
the refcount for you.

Fixes: bb7c2f5d2e9d ("cifs: Implement cache I/O by accessing the cache directly")
Cc: David Howells <dhowells@redhat.com>
Cc: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Steve French <stfrench@microsoft.com>
fs/cifs/file.c

index e64cda7a761012563a82dc2afbd2582e01ea2607..6985710e14c28bf6c1a22bb1cab62028d225bffd 100644 (file)
@@ -4459,10 +4459,10 @@ static void cifs_readahead(struct readahead_control *ractl)
                                 * TODO: Send a whole batch of pages to be read
                                 * by the cache.
                                 */
-                               page = readahead_page(ractl);
-                               last_batch_size = 1 << thp_order(page);
+                               struct folio *folio = readahead_folio(ractl);
+                               last_batch_size = folio_nr_pages(folio);
                                if (cifs_readpage_from_fscache(ractl->mapping->host,
-                                                              page) < 0) {
+                                                              &folio->page) < 0) {
                                        /*
                                         * TODO: Deal with cache read failure
                                         * here, but for the moment, delegate
@@ -4470,7 +4470,7 @@ static void cifs_readahead(struct readahead_control *ractl)
                                         */
                                        caching = false;
                                }
-                               unlock_page(page);
+                               folio_unlock(folio);
                                next_cached++;
                                cache_nr_pages--;
                                if (cache_nr_pages == 0)