]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm, compaction: distinguish between full and partial COMPACT_COMPLETE
authorMichal Hocko <mhocko@suse.com>
Fri, 20 May 2016 23:56:47 +0000 (16:56 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 21 May 2016 00:58:30 +0000 (17:58 -0700)
COMPACT_COMPLETE now means that compaction and free scanner met.  This
is not very useful information if somebody just wants to use this
feedback and make any decisions based on that.  The current caller might
be a poor guy who just happened to scan tiny portion of the zone and
that could be the reason no suitable pages were compacted.  Make sure we
distinguish the full and partial zone walks.

Consumers should treat COMPACT_PARTIAL_SKIPPED as a potential success
and be optimistic in retrying.

The existing users of COMPACT_COMPLETE are conservatively changed to use
COMPACT_PARTIAL_SKIPPED as well but some of them should be probably
reconsidered and only defer the compaction only for COMPACT_COMPLETE
with the new semantic.

This patch shouldn't introduce any functional changes.

Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/compaction.h
include/trace/events/compaction.h
mm/compaction.c
mm/internal.h

index 11f228712ed5edf158935803aa2f9e304b1c6662..9b37f9d3f7a8b0b2d62a437a20c0653516a9fc5d 100644 (file)
@@ -21,7 +21,15 @@ enum compact_result {
         * pages
         */
        COMPACT_PARTIAL,
-       /* The full zone was compacted */
+       /*
+        * direct compaction has scanned part of the zone but wasn't successfull
+        * to compact suitable pages.
+        */
+       COMPACT_PARTIAL_SKIPPED,
+       /*
+        * The full zone was compacted scanned but wasn't successfull to compact
+        * suitable pages.
+        */
        COMPACT_COMPLETE,
        /* For more detailed tracepoint output */
        COMPACT_NO_SUITABLE_PAGE,
index 6ba16c86d7dbd12ac6ca00a28f287d9819f32d85..36e2d6fb1360a722183e9cabd47b8583b557b13f 100644 (file)
@@ -14,6 +14,7 @@
        EM( COMPACT_DEFERRED,           "deferred")             \
        EM( COMPACT_CONTINUE,           "continue")             \
        EM( COMPACT_PARTIAL,            "partial")              \
+       EM( COMPACT_PARTIAL_SKIPPED,    "partial_skipped")      \
        EM( COMPACT_COMPLETE,           "complete")             \
        EM( COMPACT_NO_SUITABLE_PAGE,   "no_suitable_page")     \
        EM( COMPACT_NOT_SUITABLE_ZONE,  "not_suitable_zone")    \
index b2b94474dd285b934851fa50aa4148d41418d63a..4af1577adb5c3b33d720403b83d18d7037d45045 100644 (file)
@@ -1252,7 +1252,10 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_
                if (cc->direct_compaction)
                        zone->compact_blockskip_flush = true;
 
-               return COMPACT_COMPLETE;
+               if (cc->whole_zone)
+                       return COMPACT_COMPLETE;
+               else
+                       return COMPACT_PARTIAL_SKIPPED;
        }
 
        if (is_via_compact_memory(cc->order))
@@ -1413,6 +1416,10 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
                zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
                zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
        }
+
+       if (cc->migrate_pfn == start_pfn)
+               cc->whole_zone = true;
+
        cc->last_migrated_pfn = 0;
 
        trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
@@ -1634,7 +1641,8 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
                        goto break_loop;
                }
 
-               if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
+               if (mode != MIGRATE_ASYNC && (status == COMPACT_COMPLETE ||
+                                       status == COMPACT_PARTIAL_SKIPPED)) {
                        /*
                         * We think that allocation won't succeed in this zone
                         * so we defer compaction there. If it ends up
@@ -1881,7 +1889,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                                                cc.classzone_idx, 0)) {
                        success = true;
                        compaction_defer_reset(zone, cc.order, false);
-               } else if (status == COMPACT_COMPLETE) {
+               } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
                        /*
                         * We use sync migration mode here, so we defer like
                         * sync direct compaction does.
index 3ac544f1963fd8a5c5886ab02900c5a012c25f19..f6f3353b0868969f657bc739523ce59e9910aa8b 100644 (file)
@@ -174,6 +174,7 @@ struct compact_control {
        enum migrate_mode mode;         /* Async or sync migration mode */
        bool ignore_skip_hint;          /* Scan blocks even if marked skip */
        bool direct_compaction;         /* False from kcompactd or /proc/... */
+       bool whole_zone;                /* Whole zone has been scanned */
        int order;                      /* order a direct compactor needs */
        const gfp_t gfp_mask;           /* gfp mask of a direct compactor */
        const unsigned int alloc_flags; /* alloc flags of a direct compactor */