]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/page_alloc.c:__alloc_pages_nodemask(): don't alter arg gfp_mask
authorAndrew Morton <akpm@linux-foundation.org>
Wed, 11 Feb 2015 23:25:04 +0000 (15:25 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Feb 2015 01:06:01 +0000 (17:06 -0800)
__alloc_pages_nodemask() strips __GFP_IO when retrying the page
allocation.  But it does this by altering the function-wide variable
gfp_mask.  This will cause subsequent allocation attempts to inadvertently
use the modified gfp_mask.

Also, pass the correct mask (the mask we actually used) into
trace_mm_page_alloc().

Cc: Ming Lei <ming.lei@canonical.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index f121050e8530b8e2357153de31298a26207f1096..1c7d90f7a84ac6013dddf45b1efe2afd1b7deccc 100644 (file)
@@ -2865,6 +2865,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        unsigned int cpuset_mems_cookie;
        int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
        int classzone_idx;
+       gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
 
        gfp_mask &= gfp_allowed_mask;
 
@@ -2898,22 +2899,24 @@ retry_cpuset:
        classzone_idx = zonelist_zone_idx(preferred_zoneref);
 
        /* First allocation attempt */
-       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
-                       zonelist, high_zoneidx, alloc_flags,
-                       preferred_zone, classzone_idx, migratetype);
+       alloc_mask = gfp_mask|__GFP_HARDWALL;
+       page = get_page_from_freelist(alloc_mask, nodemask, order, zonelist,
+                       high_zoneidx, alloc_flags, preferred_zone,
+                       classzone_idx, migratetype);
        if (unlikely(!page)) {
                /*
                 * Runtime PM, block IO and its error handling path
                 * can deadlock because I/O on the device might not
                 * complete.
                 */
-               gfp_mask = memalloc_noio_flags(gfp_mask);
-               page = __alloc_pages_slowpath(gfp_mask, order,
+               alloc_mask = memalloc_noio_flags(gfp_mask);
+
+               page = __alloc_pages_slowpath(alloc_mask, order,
                                zonelist, high_zoneidx, nodemask,
                                preferred_zone, classzone_idx, migratetype);
        }
 
-       trace_mm_page_alloc(page, order, gfp_mask, migratetype);
+       trace_mm_page_alloc(page, order, alloc_mask, migratetype);
 
 out:
        /*