]> git.baikalelectronics.ru Git - kernel.git/commitdiff
memory tiering: skip to scan fast memory
authorHuang Ying <ying.huang@intel.com>
Tue, 22 Mar 2022 21:46:27 +0000 (14:46 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Mar 2022 22:57:09 +0000 (15:57 -0700)
If the NUMA balancing isn't used to optimize the page placement among
sockets but only among memory types, the hot pages in the fast memory
node couldn't be migrated (promoted) to anywhere.  So it's unnecessary
to scan the pages in the fast memory node via changing their PTE/PMD
mapping to be PROT_NONE.  So that the page faults could be avoided too.

In the test, if only the memory tiering NUMA balancing mode is enabled,
the number of the NUMA balancing hint faults for the DRAM node is
reduced to almost 0 with the patch.  While the benchmark score doesn't
change visibly.

Link: https://lkml.kernel.org/r/20220221084529.1052339-4-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Suggested-by: Dave Hansen <dave.hansen@linux.intel.com>
Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Yang Shi <shy828301@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Wei Xu <weixugc@google.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: zhongjiang-ali <zhongjiang-ali@linux.alibaba.com>
Cc: Feng Tang <feng.tang@intel.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/huge_memory.c
mm/mprotect.c

index 9c52b3661f7188edc9456c1f1d1727d76535ae3d..88c83c84325c05dbd1af48fdc517c6c495ac82e2 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/oom.h>
 #include <linux/numa.h>
 #include <linux/page_owner.h>
+#include <linux/sched/sysctl.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -1766,17 +1767,28 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        }
 #endif
 
-       /*
-        * Avoid trapping faults against the zero page. The read-only
-        * data is likely to be read-cached on the local CPU and
-        * local/remote hits to the zero page are not interesting.
-        */
-       if (prot_numa && is_huge_zero_pmd(*pmd))
-               goto unlock;
+       if (prot_numa) {
+               struct page *page;
+               /*
+                * Avoid trapping faults against the zero page. The read-only
+                * data is likely to be read-cached on the local CPU and
+                * local/remote hits to the zero page are not interesting.
+                */
+               if (is_huge_zero_pmd(*pmd))
+                       goto unlock;
 
-       if (prot_numa && pmd_protnone(*pmd))
-               goto unlock;
+               if (pmd_protnone(*pmd))
+                       goto unlock;
 
+               page = pmd_page(*pmd);
+               /*
+                * Skip scanning top tier node if normal numa
+                * balancing is disabled
+                */
+               if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
+                   node_is_toptier(page_to_nid(page)))
+                       goto unlock;
+       }
        /*
         * In case prot_numa, we are under mmap_read_lock(mm). It's critical
         * to not clear pmd intermittently to avoid race with MADV_DONTNEED
index 2887644fd15057db5669997f0ece69dc82427dd1..b69ce7a7b2b79135d15a074869a4c87faa2269b4 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/uaccess.h>
 #include <linux/mm_inline.h>
 #include <linux/pgtable.h>
+#include <linux/sched/sysctl.h>
 #include <asm/cacheflush.h>
 #include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
@@ -83,6 +84,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                         */
                        if (prot_numa) {
                                struct page *page;
+                               int nid;
 
                                /* Avoid TLB flush if possible */
                                if (pte_protnone(oldpte))
@@ -109,7 +111,16 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                                 * Don't mess with PTEs if page is already on the node
                                 * a single-threaded process is running on.
                                 */
-                               if (target_node == page_to_nid(page))
+                               nid = page_to_nid(page);
+                               if (target_node == nid)
+                                       continue;
+
+                               /*
+                                * Skip scanning top tier node if normal numa
+                                * balancing is disabled
+                                */
+                               if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
+                                   node_is_toptier(nid))
                                        continue;
                        }