]> git.baikalelectronics.ru Git - kernel.git/commitdiff
netfs: Fix missing xas_retry() calls in xarray iteration
authorDavid Howells <dhowells@redhat.com>
Thu, 3 Nov 2022 16:08:14 +0000 (16:08 +0000)
committerDavid Howells <dhowells@redhat.com>
Tue, 15 Nov 2022 16:49:06 +0000 (16:49 +0000)
netfslib has a number of places in which it performs iteration of an xarray
whilst being under the RCU read lock.  It *should* call xas_retry() as the
first thing inside of the loop and do "continue" if it returns true in case
the xarray walker passed out a special value indicating that the walk needs
to be redone from the root[*].

Fix this by adding the missing retry checks.

[*] I wonder if this should be done inside xas_find(), xas_next_node() and
    suchlike, but I'm told that's not an simple change to effect.

This can cause an oops like that below.  Note the faulting address - this
is an internal value (|0x2) returned from xarray.

BUG: kernel NULL pointer dereference, address: 0000000000000402
...
RIP: 0010:netfs_rreq_unlock+0xef/0x380 [netfs]
...
Call Trace:
 netfs_rreq_assess+0xa6/0x240 [netfs]
 netfs_readpage+0x173/0x3b0 [netfs]
 ? init_wait_var_entry+0x50/0x50
 filemap_read_page+0x33/0xf0
 filemap_get_pages+0x2f2/0x3f0
 filemap_read+0xaa/0x320
 ? do_filp_open+0xb2/0x150
 ? rmqueue+0x3be/0xe10
 ceph_read_iter+0x1fe/0x680 [ceph]
 ? new_sync_read+0x115/0x1a0
 new_sync_read+0x115/0x1a0
 vfs_read+0xf3/0x180
 ksys_read+0x5f/0xe0
 do_syscall_64+0x38/0x90
 entry_SYSCALL_64_after_hwframe+0x44/0xae

Changes:
========
ver #2)
 - Changed an unsigned int to a size_t to reduce the likelihood of an
   overflow as per Willy's suggestion.
 - Added an additional patch to fix the maths.

Fixes: 3d3c95046742 ("netfs: Provide readahead and readpage netfs helpers")
Reported-by: George Law <glaw@redhat.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: Jingbo Xu <jefflexu@linux.alibaba.com>
cc: Matthew Wilcox <willy@infradead.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/166749229733.107206.17482609105741691452.stgit@warthog.procyon.org.uk/
Link: https://lore.kernel.org/r/166757987929.950645.12595273010425381286.stgit@warthog.procyon.org.uk/
fs/netfs/buffered_read.c
fs/netfs/io.c

index 0ce5358521510694f21b9dd1133081e182593be7..baf668fb431541f11f59086389326767f6e6608a 100644 (file)
@@ -46,10 +46,15 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
 
        rcu_read_lock();
        xas_for_each(&xas, folio, last_page) {
-               unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
-               unsigned int pgend = pgpos + folio_size(folio);
+               unsigned int pgpos, pgend;
                bool pg_failed = false;
 
+               if (xas_retry(&xas, folio))
+                       continue;
+
+               pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
+               pgend = pgpos + folio_size(folio);
+
                for (;;) {
                        if (!subreq) {
                                pg_failed = true;
index 4289258992826bd298323fde25592209dcbfaad3..e374767d1b6832745e9c59249d426c331669f95b 100644 (file)
@@ -121,6 +121,9 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
                XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
 
                xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+                       if (xas_retry(&xas, folio))
+                               continue;
+
                        /* We might have multiple writes from the same huge
                         * folio, but we mustn't unlock a folio more than once.
                         */