]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: swapoff: take notice of completion sooner
authorHugh Dickins <hughd@google.com>
Fri, 19 Apr 2019 00:50:09 +0000 (17:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 19 Apr 2019 16:46:04 +0000 (09:46 -0700)
The old try_to_unuse() implementation was driven by find_next_to_unuse(),
which terminated as soon as all the swap had been freed.

Add inuse_pages checks now (alongside signal_pending()) to stop scanning
mms and swap_map once finished.

The same ought to be done in shmem_unuse() too, but never was before,
and needs a different interface: so leave it as is for now.

Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1904081258200.1523@eggly.anvils
Fixes: e4b318d7e2c0 ("mm: rid swapoff of quadratic complexity")
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: "Alex Xu (Hello71)" <alex_y_xu@yahoo.ca>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Kelley Nielsen <kelleynnn@gmail.com>
Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vineeth Pillai <vpillai@digitalocean.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/swapfile.c

index bf4ef2e40f23dc3285ddd77478bcce74cb09396b..71383625a582c47dd87281655329b6eeada87121 100644 (file)
@@ -2051,11 +2051,9 @@ retry:
 
        spin_lock(&mmlist_lock);
        p = &init_mm.mmlist;
-       while ((p = p->next) != &init_mm.mmlist) {
-               if (signal_pending(current)) {
-                       retval = -EINTR;
-                       break;
-               }
+       while (si->inuse_pages &&
+              !signal_pending(current) &&
+              (p = p->next) != &init_mm.mmlist) {
 
                mm = list_entry(p, struct mm_struct, mmlist);
                if (!mmget_not_zero(mm))
@@ -2082,7 +2080,9 @@ retry:
        mmput(prev_mm);
 
        i = 0;
-       while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
+       while (si->inuse_pages &&
+              !signal_pending(current) &&
+              (i = find_next_to_unuse(si, i, frontswap)) != 0) {
 
                entry = swp_entry(type, i);
                page = find_get_page(swap_address_space(entry), i);
@@ -2123,8 +2123,11 @@ retry:
         * separate lists, and wait for those lists to be emptied; but it's
         * easier and more robust (though cpu-intensive) just to keep retrying.
         */
-       if (si->inuse_pages)
-               goto retry;
+       if (si->inuse_pages) {
+               if (!signal_pending(current))
+                       goto retry;
+               retval = -EINTR;
+       }
 out:
        return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval;
 }