]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/vmscan: __isolate_lru_page_prepare() cleanup
authorAlex Shi <alex.shi@linux.alibaba.com>
Wed, 24 Feb 2021 20:08:01 +0000 (12:08 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 24 Feb 2021 21:38:33 +0000 (13:38 -0800)
The function just returns 2 results, so using a 'switch' to deal with its
result is unnecessary.  Also simplify it to a bool func as Vlastimil
suggested.

Also remove 'goto' by reusing list_move(), and take Matthew Wilcox's
suggestion to update comments in function.

Link: https://lkml.kernel.org/r/728874d7-2d93-4049-68c1-dcc3b2d52ccd@linux.alibaba.com
Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/swap.h
mm/compaction.c
mm/vmscan.c

index 0d64a2bc7e001d554e5d4e0f2e0240deac5f24f2..32f665b1ee85c2b675ab2967f68e314007b90f6e 100644 (file)
@@ -356,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
 extern unsigned long zone_reclaimable_pages(struct zone *zone);
 extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                                        gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
+extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
                                                  unsigned long nr_pages,
                                                  gfp_t gfp_mask,
index 190ccdaa6c192f641a456c15bd6623b85178abce..8f52532675a918125faab95e4bddb9c21f772649 100644 (file)
@@ -988,7 +988,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                if (unlikely(!get_page_unless_zero(page)))
                        goto isolate_fail;
 
-               if (__isolate_lru_page_prepare(page, isolate_mode) != 0)
+               if (!__isolate_lru_page_prepare(page, isolate_mode))
                        goto isolate_fail_put;
 
                /* Try isolate the page */
index b1b574ad199d2ca8bde196e73de0a129e8a61e43..04509994aed49329139138fc67d7fc3b64999e00 100644 (file)
@@ -1539,19 +1539,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
  * page:       page to consider
  * mode:       one of the LRU isolation modes defined above
  *
- * returns 0 on success, -ve errno on failure.
+ * returns true on success, false on failure.
  */
-int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
+bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
 {
-       int ret = -EBUSY;
-
        /* Only take pages on the LRU. */
        if (!PageLRU(page))
-               return ret;
+               return false;
 
        /* Compaction should not handle unevictable pages but CMA can do so */
        if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
-               return ret;
+               return false;
 
        /*
         * To minimise LRU disruption, the caller can indicate that it only
@@ -1564,7 +1562,7 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
        if (mode & ISOLATE_ASYNC_MIGRATE) {
                /* All the caller can do on PageWriteback is block */
                if (PageWriteback(page))
-                       return ret;
+                       return false;
 
                if (PageDirty(page)) {
                        struct address_space *mapping;
@@ -1580,20 +1578,20 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
                         * from the page cache.
                         */
                        if (!trylock_page(page))
-                               return ret;
+                               return false;
 
                        mapping = page_mapping(page);
                        migrate_dirty = !mapping || mapping->a_ops->migratepage;
                        unlock_page(page);
                        if (!migrate_dirty)
-                               return ret;
+                               return false;
                }
        }
 
        if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
-               return ret;
+               return false;
 
-       return 0;
+       return true;
 }
 
 /*
@@ -1677,35 +1675,31 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                 * only when the page is being freed somewhere else.
                 */
                scan += nr_pages;
-               switch (__isolate_lru_page_prepare(page, mode)) {
-               case 0:
-                       /*
-                        * Be careful not to clear PageLRU until after we're
-                        * sure the page is not being freed elsewhere -- the
-                        * page release code relies on it.
-                        */
-                       if (unlikely(!get_page_unless_zero(page)))
-                               goto busy;
-
-                       if (!TestClearPageLRU(page)) {
-                               /*
-                                * This page may in other isolation path,
-                                * but we still hold lru_lock.
-                                */
-                               put_page(page);
-                               goto busy;
-                       }
-
-                       nr_taken += nr_pages;
-                       nr_zone_taken[page_zonenum(page)] += nr_pages;
-                       list_move(&page->lru, dst);
-                       break;
+               if (!__isolate_lru_page_prepare(page, mode)) {
+                       /* It is being freed elsewhere */
+                       list_move(&page->lru, src);
+                       continue;
+               }
+               /*
+                * Be careful not to clear PageLRU until after we're
+                * sure the page is not being freed elsewhere -- the
+                * page release code relies on it.
+                */
+               if (unlikely(!get_page_unless_zero(page))) {
+                       list_move(&page->lru, src);
+                       continue;
+               }
 
-               default:
-busy:
-                       /* else it is being freed elsewhere */
+               if (!TestClearPageLRU(page)) {
+                       /* Another thread is already isolating this page */
+                       put_page(page);
                        list_move(&page->lru, src);
+                       continue;
                }
+
+               nr_taken += nr_pages;
+               nr_zone_taken[page_zonenum(page)] += nr_pages;
+               list_move(&page->lru, dst);
        }
 
        /*