]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm/hugetlb: filter out hugetlb pages if HUGEPAGE migration is not supported.
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tue, 4 Sep 2018 22:45:59 +0000 (15:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 4 Sep 2018 23:45:02 +0000 (16:45 -0700)
When scanning for movable pages, filter out Hugetlb pages if hugepage
migration is not supported.  Without this we hit infinte loop in
__offline_pages() where we do

pfn = scan_movable_pages(start_pfn, end_pfn);
if (pfn) { /* We have movable pages */
ret = do_migrate_range(pfn, end_pfn);
goto repeat;
}

Fix this by checking hugepage_migration_supported both in
has_unmovable_pages which is the primary backoff mechanism for page
offlining and for consistency reasons also into scan_movable_pages
because it doesn't make any sense to return a pfn to non-migrateable
huge page.

This issue was revealed by, but not caused by c5b52c6e41f2 ("mm,
memory_hotplug: do not fail offlining too early").

Link: http://lkml.kernel.org/r/20180824063314.21981-1-aneesh.kumar@linux.ibm.com
Fixes: c5b52c6e41f2 ("mm, memory_hotplug: do not fail offlining too early")
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Reported-by: Haren Myneni <haren@linux.vnet.ibm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory_hotplug.c
mm/page_alloc.c

index 9eea6e809a4e99fcf11125d47f28d7493af8b0ab..38d94b703e9d4932279b5657a2c889dfaaf1b749 100644 (file)
@@ -1333,7 +1333,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
                        if (__PageMovable(page))
                                return pfn;
                        if (PageHuge(page)) {
-                               if (page_huge_active(page))
+                               if (hugepage_migration_supported(page_hstate(page)) &&
+                                   page_huge_active(page))
                                        return pfn;
                                else
                                        pfn = round_up(pfn + 1,
index 05e983f42316a1c5446536fb16cdf3f0ceb3cdd0..89d2a2ab3fe68c3ae46104074c519c7500dd86cb 100644 (file)
@@ -7708,6 +7708,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                 * handle each tail page individually in migration.
                 */
                if (PageHuge(page)) {
+
+                       if (!hugepage_migration_supported(page_hstate(page)))
+                               goto unmovable;
+
                        iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
                        continue;
                }