]> git.baikalelectronics.ru Git - kernel.git/commitdiff
x86/vmemmap: drop handling of 1GB vmemmap ranges
authorOscar Salvador <osalvador@suse.de>
Fri, 30 Apr 2021 05:57:16 +0000 (22:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Apr 2021 18:20:38 +0000 (11:20 -0700)
There is no code to allocate 1GB pages when mapping the vmemmap range as
this might waste some memory and requires more complexity which is not
really worth.

Drop the dead code both for the aligned and unaligned cases and leave only
the direct map handling.

Link: https://lkml.kernel.org/r/20210309214050.4674-3-osalvador@suse.de
Signed-off-by: Oscar Salvador <osalvador@suse.de>
Suggested-by: David Hildenbrand <david@redhat.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/mm/init_64.c

index ff312a87e58d8892952273865739cbab0aa399ee..af217ff6da577bc8a1d259f1b9b0596736791f9a 100644 (file)
@@ -1062,7 +1062,6 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
        unsigned long next, pages = 0;
        pmd_t *pmd_base;
        pud_t *pud;
-       void *page_addr;
 
        pud = pud_start + pud_index(addr);
        for (; addr < end; addr = next, pud++) {
@@ -1071,33 +1070,13 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
                if (!pud_present(*pud))
                        continue;
 
-               if (pud_large(*pud)) {
-                       if (IS_ALIGNED(addr, PUD_SIZE) &&
-                           IS_ALIGNED(next, PUD_SIZE)) {
-                               if (!direct)
-                                       free_pagetable(pud_page(*pud),
-                                                      get_order(PUD_SIZE));
-
-                               spin_lock(&init_mm.page_table_lock);
-                               pud_clear(pud);
-                               spin_unlock(&init_mm.page_table_lock);
-                               pages++;
-                       } else {
-                               /* If here, we are freeing vmemmap pages. */
-                               memset((void *)addr, PAGE_INUSE, next - addr);
-
-                               page_addr = page_address(pud_page(*pud));
-                               if (!memchr_inv(page_addr, PAGE_INUSE,
-                                               PUD_SIZE)) {
-                                       free_pagetable(pud_page(*pud),
-                                                      get_order(PUD_SIZE));
-
-                                       spin_lock(&init_mm.page_table_lock);
-                                       pud_clear(pud);
-                                       spin_unlock(&init_mm.page_table_lock);
-                               }
-                       }
-
+               if (pud_large(*pud) &&
+                   IS_ALIGNED(addr, PUD_SIZE) &&
+                   IS_ALIGNED(next, PUD_SIZE)) {
+                       spin_lock(&init_mm.page_table_lock);
+                       pud_clear(pud);
+                       spin_unlock(&init_mm.page_table_lock);
+                       pages++;
                        continue;
                }