]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/page_isolation: fix potential warning from user
authorQian Cai <cai@lca.pw>
Fri, 31 Jan 2020 06:15:01 +0000 (22:15 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 31 Jan 2020 18:30:39 +0000 (10:30 -0800)
It makes sense to call the WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE)
from start_isolate_page_range(), but should avoid triggering it from
userspace, i.e, from is_mem_section_removable() because it could crash
the system by a non-root user if warn_on_panic is set.

While at it, simplify the code a bit by removing an unnecessary jump
label.

Link: http://lkml.kernel.org/r/20200120163915.1469-1-cai@lca.pw
Signed-off-by: Qian Cai <cai@lca.pw>
Suggested-by: Michal Hocko <mhocko@kernel.org>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c
mm/page_isolation.c

index 2a1a816c79921927825506acaee3254b8b006c0a..15e908ad933bc281bf0753158459387df1867fd3 100644 (file)
@@ -8214,7 +8214,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
                if (is_migrate_cma(migratetype))
                        return NULL;
 
-               goto unmovable;
+               return page;
        }
 
        for (; iter < pageblock_nr_pages; iter++) {
@@ -8224,7 +8224,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
                page = pfn_to_page(pfn + iter);
 
                if (PageReserved(page))
-                       goto unmovable;
+                       return page;
 
                /*
                 * If the zone is movable and we have ruled out all reserved
@@ -8244,7 +8244,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
                        unsigned int skip_pages;
 
                        if (!hugepage_migration_supported(page_hstate(head)))
-                               goto unmovable;
+                               return page;
 
                        skip_pages = compound_nr(head) - (page - head);
                        iter += skip_pages - 1;
@@ -8286,12 +8286,9 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
                 * is set to both of a memory hole page and a _used_ kernel
                 * page at boot.
                 */
-               goto unmovable;
+               return page;
        }
        return NULL;
-unmovable:
-       WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
-       return pfn_to_page(pfn + iter);
 }
 
 #ifdef CONFIG_CONTIG_ALLOC
index e70586523ca3c7fe932b1d3f671810c9a060f9be..a9fd7c740c23894bc94e57fc380778880f923722 100644 (file)
@@ -54,14 +54,18 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
 
 out:
        spin_unlock_irqrestore(&zone->lock, flags);
-       if (!ret)
+       if (!ret) {
                drain_all_pages(zone);
-       else if ((isol_flags & REPORT_FAILURE) && unmovable)
-               /*
-                * printk() with zone->lock held will guarantee to trigger a
-                * lockdep splat, so defer it here.
-                */
-               dump_page(unmovable, "unmovable page");
+       } else {
+               WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
+
+               if ((isol_flags & REPORT_FAILURE) && unmovable)
+                       /*
+                        * printk() with zone->lock held will likely trigger a
+                        * lockdep splat, so defer it here.
+                        */
+                       dump_page(unmovable, "unmovable page");
+       }
 
        return ret;
 }