]> git.baikalelectronics.ru Git - kernel.git/commitdiff
powerpc/64s/radix: Fix 128TB-512TB virtual address boundary case allocation
authorNicholas Piggin <npiggin@gmail.com>
Thu, 9 Nov 2017 17:27:39 +0000 (04:27 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 13 Nov 2017 12:35:29 +0000 (23:35 +1100)
Radix VA space allocations test addresses against mm->task_size which
is 512TB, even in cases where the intention is to limit allocation to
below 128TB.

This results in mmap with a hint address below 128TB but address +
length above 128TB succeeding when it should fail (as hash does after
the previous patch).

Set the high address limit to be considered up front, and base
subsequent allocation checks on that consistently.

Fixes: f4ea6dcb08ea ("powerpc/mm: Enable mappings above 128TB")
Cc: stable@vger.kernel.org # v4.12+
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/mm/hugetlbpage-radix.c
arch/powerpc/mm/mmap.c

index a12e86395025f9c7fcdb8a12c76ae642a1ef7cb8..0a3d71aae175a4a168b00ac2b845a5a11b9c6448 100644 (file)
@@ -48,17 +48,28 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        struct hstate *h = hstate_file(file);
+       int fixed = (flags & MAP_FIXED);
+       unsigned long high_limit;
        struct vm_unmapped_area_info info;
 
-       if (unlikely(addr > mm->context.addr_limit && addr < TASK_SIZE))
-               mm->context.addr_limit = TASK_SIZE;
+       high_limit = DEFAULT_MAP_WINDOW;
+       if (addr >= high_limit || (fixed && (addr + len > high_limit)))
+               high_limit = TASK_SIZE;
 
        if (len & ~huge_page_mask(h))
                return -EINVAL;
-       if (len > mm->task_size)
+       if (len > high_limit)
                return -ENOMEM;
+       if (fixed) {
+               if (addr > high_limit - len)
+                       return -ENOMEM;
+       }
 
-       if (flags & MAP_FIXED) {
+       if (unlikely(addr > mm->context.addr_limit &&
+                    mm->context.addr_limit != TASK_SIZE))
+               mm->context.addr_limit = TASK_SIZE;
+
+       if (fixed) {
                if (prepare_hugepage_range(file, addr, len))
                        return -EINVAL;
                return addr;
@@ -67,7 +78,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        if (addr) {
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
-               if (mm->task_size - len >= addr &&
+               if (high_limit - len >= addr &&
                    (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
@@ -78,12 +89,9 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
        info.low_limit = PAGE_SIZE;
-       info.high_limit = current->mm->mmap_base;
+       info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
 
-       if (addr > DEFAULT_MAP_WINDOW)
-               info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
-
        return vm_unmapped_area(&info);
 }
index 5d78b193fec4142ecd3d4d5788dfe686cf2e8e6c..6d476a7b56112bf0fe985a39bd0ea8585cbd60b6 100644 (file)
@@ -106,22 +106,32 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
+       int fixed = (flags & MAP_FIXED);
+       unsigned long high_limit;
        struct vm_unmapped_area_info info;
 
+       high_limit = DEFAULT_MAP_WINDOW;
+       if (addr >= high_limit || (fixed && (addr + len > high_limit)))
+               high_limit = TASK_SIZE;
+
+       if (len > high_limit)
+               return -ENOMEM;
+       if (fixed) {
+               if (addr > high_limit - len)
+                       return -ENOMEM;
+       }
+
        if (unlikely(addr > mm->context.addr_limit &&
                     mm->context.addr_limit != TASK_SIZE))
                mm->context.addr_limit = TASK_SIZE;
 
-       if (len > mm->task_size - mmap_min_addr)
-               return -ENOMEM;
-
-       if (flags & MAP_FIXED)
+       if (fixed)
                return addr;
 
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
-               if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
+               if (high_limit - len >= addr && addr >= mmap_min_addr &&
                    (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
@@ -129,13 +139,9 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
        info.flags = 0;
        info.length = len;
        info.low_limit = mm->mmap_base;
+       info.high_limit = high_limit;
        info.align_mask = 0;
 
-       if (unlikely(addr > DEFAULT_MAP_WINDOW))
-               info.high_limit = mm->context.addr_limit;
-       else
-               info.high_limit = DEFAULT_MAP_WINDOW;
-
        return vm_unmapped_area(&info);
 }
 
@@ -149,37 +155,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
        struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
        unsigned long addr = addr0;
+       int fixed = (flags & MAP_FIXED);
+       unsigned long high_limit;
        struct vm_unmapped_area_info info;
 
+       high_limit = DEFAULT_MAP_WINDOW;
+       if (addr >= high_limit || (fixed && (addr + len > high_limit)))
+               high_limit = TASK_SIZE;
+
+       if (len > high_limit)
+               return -ENOMEM;
+       if (fixed) {
+               if (addr > high_limit - len)
+                       return -ENOMEM;
+       }
+
        if (unlikely(addr > mm->context.addr_limit &&
                     mm->context.addr_limit != TASK_SIZE))
                mm->context.addr_limit = TASK_SIZE;
 
-       /* requested length too big for entire address space */
-       if (len > mm->task_size - mmap_min_addr)
-               return -ENOMEM;
-
-       if (flags & MAP_FIXED)
+       if (fixed)
                return addr;
 
-       /* requesting a specific address */
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
-               if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
-                               (!vma || addr + len <= vm_start_gap(vma)))
+               if (high_limit - len >= addr && addr >= mmap_min_addr &&
+                   (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
 
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
        info.low_limit = max(PAGE_SIZE, mmap_min_addr);
-       info.high_limit = mm->mmap_base;
+       info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
        info.align_mask = 0;
 
-       if (addr > DEFAULT_MAP_WINDOW)
-               info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
-
        addr = vm_unmapped_area(&info);
        if (!(addr & ~PAGE_MASK))
                return addr;