]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/vmscan: update stale comments
authorAndrey Ryabinin <aryabinin@virtuozzo.com>
Tue, 10 Apr 2018 23:27:51 +0000 (16:27 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 11 Apr 2018 17:28:29 +0000 (10:28 -0700)
Update some comments that became stale since transiton from per-zone to
per-node reclaim.

Link: http://lkml.kernel.org/r/20180315164553.17856-2-aryabinin@virtuozzo.com
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Tejun Heo <tj@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/vmscan.c

index 4390a8d5be41ee497569622e3b0381a851870114..6d74b12099bdee287edb85640ad013a886402f1f 100644 (file)
@@ -926,7 +926,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                        (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 
                /*
-                * The number of dirty pages determines if a zone is marked
+                * The number of dirty pages determines if a node is marked
                 * reclaim_congested which affects wait_iff_congested. kswapd
                 * will stall and start writing pages if the tail of the LRU
                 * is all dirty unqueued pages.
@@ -1764,7 +1764,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
         * as there is no guarantee the dirtying process is throttled in the
         * same way balance_dirty_pages() manages.
         *
-        * Once a zone is flagged ZONE_WRITEBACK, kswapd will count the number
+        * Once a node is flagged PGDAT_WRITEBACK, kswapd will count the number
         * of pages under pages flagged for immediate reclaim and stall if any
         * are encountered in the nr_immediate check below.
         */
@@ -1791,7 +1791,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
         */
        if (sane_reclaim(sc)) {
                /*
-                * Tag a zone as congested if all the dirty pages scanned were
+                * Tag a node as congested if all the dirty pages scanned were
                 * backed by a congested BDI and wait_iff_congested will stall.
                 */
                if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
@@ -1812,7 +1812,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        }
 
        /*
-        * Stall direct reclaim for IO completions if underlying BDIs or zone
+        * Stall direct reclaim for IO completions if underlying BDIs and node
         * is congested. Allow kswapd to continue until it starts encountering
         * unqueued dirty pages or cycling through the LRU too quickly.
         */
@@ -3808,7 +3808,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
 
        if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages) {
                /*
-                * Free memory by calling shrink zone with increasing
+                * Free memory by calling shrink node with increasing
                 * priorities until we have enough memory freed.
                 */
                do {