]> git.baikalelectronics.ru Git - kernel.git/commitdiff
mm: hugetlb: soft-offline: dissolve source hugepage after successful migration
authorAnshuman Khandual <khandual@linux.vnet.ibm.com>
Mon, 10 Jul 2017 22:47:41 +0000 (15:47 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 10 Jul 2017 23:32:30 +0000 (16:32 -0700)
Currently hugepage migrated by soft-offline (i.e.  due to correctable
memory errors) is contained as a hugepage, which means many non-error
pages in it are unreusable, i.e.  wasted.

This patch solves this issue by dissolving source hugepages into buddy.
As done in previous patch, PageHWPoison is set only on a head page of
the error hugepage.  Then in dissoliving we move the PageHWPoison flag
to the raw error page so that all healthy subpages return back to buddy.

[arnd@arndb.de: fix warnings: replace some macros with inline functions]
Link: http://lkml.kernel.org/r/20170609102544.2947326-1-arnd@arndb.de
Link: http://lkml.kernel.org/r/1496305019-5493-5-git-send-email-n-horiguchi@ah.jp.nec.com
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/hugetlb.h
mm/hugetlb.c
mm/memory-failure.c
mm/migrate.c

index 46bfb702e7d62ac021505d848b94fcdba638838e..668ab1742ef6e6287fddca73178f79f570766f25 100644 (file)
@@ -472,6 +472,7 @@ static inline pgoff_t basepage_index(struct page *page)
        return __basepage_index(page);
 }
 
+extern int dissolve_free_huge_page(struct page *page);
 extern int dissolve_free_huge_pages(unsigned long start_pfn,
                                    unsigned long end_pfn);
 static inline bool hugepage_migration_supported(struct hstate *h)
@@ -550,15 +551,37 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
 {
        return 1;
 }
-#define hstate_index_to_shift(index) 0
-#define hstate_index(h) 0
+
+static inline unsigned hstate_index_to_shift(unsigned index)
+{
+       return 0;
+}
+
+static inline int hstate_index(struct hstate *h)
+{
+       return 0;
+}
 
 static inline pgoff_t basepage_index(struct page *page)
 {
        return page->index;
 }
-#define dissolve_free_huge_pages(s, e) 0
-#define hugepage_migration_supported(h)        false
+
+static inline int dissolve_free_huge_page(struct page *page)
+{
+       return 0;
+}
+
+static inline int dissolve_free_huge_pages(unsigned long start_pfn,
+                                          unsigned long end_pfn)
+{
+       return 0;
+}
+
+static inline bool hugepage_migration_supported(struct hstate *h)
+{
+       return false;
+}
 
 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
                                           struct mm_struct *mm, pte_t *pte)
index 41a1b48cefbfc17de0fa50e13e7e28bb0b4fc831..b2d44363837a64fd60984a9f1f1e5e0927d0ff9f 100644 (file)
@@ -1459,7 +1459,7 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
  * number of free hugepages would be reduced below the number of reserved
  * hugepages.
  */
-static int dissolve_free_huge_page(struct page *page)
+int dissolve_free_huge_page(struct page *page)
 {
        int rc = 0;
 
@@ -1472,6 +1472,14 @@ static int dissolve_free_huge_page(struct page *page)
                        rc = -EBUSY;
                        goto out;
                }
+               /*
+                * Move PageHWPoison flag from head page to the raw error page,
+                * which makes any subpages rather than the error page reusable.
+                */
+               if (PageHWPoison(head) && page != head) {
+                       SetPageHWPoison(page);
+                       ClearPageHWPoison(head);
+               }
                list_del(&head->lru);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
index a9ddb0e72f5bb0151084fff51e3721b03fdab71b..42c5803e62758a3b3032866b5018f21fedc52a00 100644 (file)
@@ -1575,11 +1575,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
                if (ret > 0)
                        ret = -EIO;
        } else {
-               /* overcommit hugetlb page will be freed to buddy */
-               SetPageHWPoison(page);
                if (PageHuge(page))
-                       dequeue_hwpoisoned_huge_page(hpage);
-               num_poisoned_pages_inc();
+                       dissolve_free_huge_page(page);
        }
        return ret;
 }
index 051cc1555d36e34f4734a7e54faac1a6c2fe144c..8935cbe362ce77e32b7a3449d6fdae0990dd3561 100644 (file)
@@ -1252,6 +1252,8 @@ put_anon:
 out:
        if (rc != -EAGAIN)
                putback_active_hugepage(hpage);
+       if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
+               num_poisoned_pages_inc();
 
        /*
         * If migration was not successful and there's a freeing callback, use