]> git.baikalelectronics.ru Git - kernel.git/commitdiff
zsmalloc: fix races between asynchronous zspage free and page migration
authorSultan Alsawaf <sultan@kerneltoast.com>
Fri, 13 May 2022 22:11:26 +0000 (15:11 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 13 May 2022 22:11:26 +0000 (15:11 -0700)
The asynchronous zspage free worker tries to lock a zspage's entire page
list without defending against page migration.  Since pages which haven't
yet been locked can concurrently migrate off the zspage page list while
lock_zspage() churns away, lock_zspage() can suffer from a few different
lethal races.

It can lock a page which no longer belongs to the zspage and unsafely
dereference page_private(), it can unsafely dereference a torn pointer to
the next page (since there's a data race), and it can observe a spurious
NULL pointer to the next page and thus not lock all of the zspage's pages
(since a single page migration will reconstruct the entire page list, and
create_page_chain() unconditionally zeroes out each list pointer in the
process).

Fix the races by using migrate_read_lock() in lock_zspage() to synchronize
with page migration.

Link: https://lkml.kernel.org/r/20220509024703.243847-1-sultan@kerneltoast.com
Fixes: a448c30efb2716 ("zsmalloc: zs_page_migrate: skip unnecessary loops but not return -EBUSY if zspage is not inuse")
Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zsmalloc.c

index 9152fbde33b50f93a4f8ef949caebd6d44c5ffcf..5d5fc04385b8d78ab70ae3c8d7d276e876030352 100644 (file)
@@ -1718,11 +1718,40 @@ static enum fullness_group putback_zspage(struct size_class *class,
  */
 static void lock_zspage(struct zspage *zspage)
 {
-       struct page *page = get_first_page(zspage);
+       struct page *curr_page, *page;
 
-       do {
-               lock_page(page);
-       } while ((page = get_next_page(page)) != NULL);
+       /*
+        * Pages we haven't locked yet can be migrated off the list while we're
+        * trying to lock them, so we need to be careful and only attempt to
+        * lock each page under migrate_read_lock(). Otherwise, the page we lock
+        * may no longer belong to the zspage. This means that we may wait for
+        * the wrong page to unlock, so we must take a reference to the page
+        * prior to waiting for it to unlock outside migrate_read_lock().
+        */
+       while (1) {
+               migrate_read_lock(zspage);
+               page = get_first_page(zspage);
+               if (trylock_page(page))
+                       break;
+               get_page(page);
+               migrate_read_unlock(zspage);
+               wait_on_page_locked(page);
+               put_page(page);
+       }
+
+       curr_page = page;
+       while ((page = get_next_page(curr_page))) {
+               if (trylock_page(page)) {
+                       curr_page = page;
+               } else {
+                       get_page(page);
+                       migrate_read_unlock(zspage);
+                       wait_on_page_locked(page);
+                       put_page(page);
+                       migrate_read_lock(zspage);
+               }
+       }
+       migrate_read_unlock(zspage);
 }
 
 static int zs_init_fs_context(struct fs_context *fc)