]> git.baikalelectronics.ru Git - kernel.git/commitdiff
powerpc/64s: mm_context.addr_limit is only used on hash
authorNicholas Piggin <npiggin@gmail.com>
Thu, 9 Nov 2017 17:27:40 +0000 (04:27 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 13 Nov 2017 12:35:43 +0000 (23:35 +1100)
Radix keeps no meaningful state in addr_limit, so remove it from radix
code and rename to slb_addr_limit to make it clear it applies to hash
only.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/book3s/64/mmu.h
arch/powerpc/include/asm/paca.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/mm/hugetlbpage-radix.c
arch/powerpc/mm/mmap.c
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/mm/slb_low.S
arch/powerpc/mm/slice.c

index 508275bb05d51b2908d7549acb71bc0978e901c8..e91e115a816f1685479e1173a3156676b9402d69 100644 (file)
@@ -606,7 +606,7 @@ extern void slb_set_size(u16 size);
 
 /* 4 bits per slice and we have one slice per 1TB */
 #define SLICE_ARRAY_SIZE       (H_PGTABLE_RANGE >> 41)
-#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.addr_limit >> 41)
+#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.slb_addr_limit >> 41)
 
 #ifndef __ASSEMBLY__
 
index c3b00e8ff79103876b3368a5cf3b4d708c579bf8..49a07c5d9e50daede9d96a595e9b6089d44a823a 100644 (file)
@@ -92,7 +92,7 @@ typedef struct {
 #ifdef CONFIG_PPC_MM_SLICES
        u64 low_slices_psize;   /* SLB page size encodings */
        unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
-       unsigned long addr_limit;
+       unsigned long slb_addr_limit;
 #else
        u16 sllp;               /* SLB page size encoding */
 #endif
index c907ae23c9563b3737511cf30b2b11b510b8f02e..3892db93b8374e1f1256a6b497d384f3faedc06e 100644 (file)
@@ -143,7 +143,7 @@ struct paca_struct {
 #ifdef CONFIG_PPC_MM_SLICES
        u64 mm_ctx_low_slices_psize;
        unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
-       unsigned long addr_limit;
+       unsigned long mm_ctx_slb_addr_limit;
 #else
        u16 mm_ctx_user_psize;
        u16 mm_ctx_sllp;
index 200623e71474b15d5a1ac3a9d05c2c67f84bd812..9aace433491adf30ebfaaad68fac9723f5585e5f 100644 (file)
@@ -185,7 +185,7 @@ int main(void)
 #ifdef CONFIG_PPC_MM_SLICES
        OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
        OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
-       DEFINE(PACA_ADDR_LIMIT, offsetof(struct paca_struct, addr_limit));
+       OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
        DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
 #endif /* CONFIG_PPC_MM_SLICES */
 #endif
index 5d38d5ea9a2416c9aedddcdf52e01fdba155a8c5..d6597038931dc8a6bd65c55101339f0a63c39da5 100644 (file)
@@ -262,8 +262,8 @@ void copy_mm_to_paca(struct mm_struct *mm)
 
        get_paca()->mm_ctx_id = context->id;
 #ifdef CONFIG_PPC_MM_SLICES
-       VM_BUG_ON(!mm->context.addr_limit);
-       get_paca()->addr_limit = mm->context.addr_limit;
+       VM_BUG_ON(!mm->context.slb_addr_limit);
+       get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
        get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize;
        memcpy(&get_paca()->mm_ctx_high_slices_psize,
               &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
index fa661ed616f5c4d6de0c339f7bbab5ed81063a0f..2075322cd22522edf7de78e32ef2679fae8e9913 100644 (file)
@@ -898,7 +898,8 @@ void __init setup_arch(char **cmdline_p)
 
 #ifdef CONFIG_PPC_MM_SLICES
 #ifdef CONFIG_PPC64
-       init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
+       if (!radix_enabled())
+               init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
 #else
 #error "context.addr_limit not initialized."
 #endif
index 0a3d71aae175a4a168b00ac2b845a5a11b9c6448..b54b581a2f7da082103a84860fd6369761656501 100644 (file)
@@ -60,16 +60,10 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                return -EINVAL;
        if (len > high_limit)
                return -ENOMEM;
+
        if (fixed) {
                if (addr > high_limit - len)
                        return -ENOMEM;
-       }
-
-       if (unlikely(addr > mm->context.addr_limit &&
-                    mm->context.addr_limit != TASK_SIZE))
-               mm->context.addr_limit = TASK_SIZE;
-
-       if (fixed) {
                if (prepare_hugepage_range(file, addr, len))
                        return -EINVAL;
                return addr;
index 6d476a7b56112bf0fe985a39bd0ea8585cbd60b6..d503f344e476e497912def22e544d56a88b65567 100644 (file)
@@ -116,17 +116,12 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
 
        if (len > high_limit)
                return -ENOMEM;
+
        if (fixed) {
                if (addr > high_limit - len)
                        return -ENOMEM;
-       }
-
-       if (unlikely(addr > mm->context.addr_limit &&
-                    mm->context.addr_limit != TASK_SIZE))
-               mm->context.addr_limit = TASK_SIZE;
-
-       if (fixed)
                return addr;
+       }
 
        if (addr) {
                addr = PAGE_ALIGN(addr);
@@ -165,17 +160,12 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
 
        if (len > high_limit)
                return -ENOMEM;
+
        if (fixed) {
                if (addr > high_limit - len)
                        return -ENOMEM;
-       }
-
-       if (unlikely(addr > mm->context.addr_limit &&
-                    mm->context.addr_limit != TASK_SIZE))
-               mm->context.addr_limit = TASK_SIZE;
-
-       if (fixed)
                return addr;
+       }
 
        if (addr) {
                addr = PAGE_ALIGN(addr);
index 846cbad45fcef2f918bdfdce191b909234a23380..5e193e444ee888eca618516cc21c02c270eb3d39 100644 (file)
@@ -96,8 +96,8 @@ static int hash__init_new_context(struct mm_struct *mm)
         * In the case of exec, use the default limit,
         * otherwise inherit it from the mm we are duplicating.
         */
-       if (!mm->context.addr_limit)
-               mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
+       if (!mm->context.slb_addr_limit)
+               mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
 
        /*
         * The old code would re-promote on fork, we don't do that when using
index ed60ad861dfa45b60bf1c449acd80717434e12e1..2cf5ef3fc50dbfdc7207a1a0399682716ce8cf77 100644 (file)
@@ -167,7 +167,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
         /*
          * user space make sure we are within the allowed limit
         */
-       ld      r11,PACA_ADDR_LIMIT(r13)
+       ld      r11,PACA_SLB_ADDR_LIMIT(r13)
        cmpld   r3,r11
        bge-    8f
 
index a4f93699194b6ab7c71ca00374efa94d35d816ef..564fff06f5c11ed32bd1ef97b1b9ad521e9cd9a3 100644 (file)
@@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
 {
        struct vm_area_struct *vma;
 
-       if ((mm->context.addr_limit - len) < addr)
+       if ((mm->context.slb_addr_limit - len) < addr)
                return 0;
        vma = find_vma(mm, addr);
        return (!vma || (addr + len) <= vm_start_gap(vma));
@@ -133,10 +133,10 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
                if (!slice_low_has_vma(mm, i))
                        ret->low_slices |= 1u << i;
 
-       if (mm->context.addr_limit <= SLICE_LOW_TOP)
+       if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
                return;
 
-       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
+       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
                if (!slice_high_has_vma(mm, i))
                        __set_bit(i, ret->high_slices);
 }
@@ -157,7 +157,7 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
                        ret->low_slices |= 1u << i;
 
        hpsizes = mm->context.high_slices_psize;
-       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
+       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
                mask_index = i & 0x1;
                index = i >> 1;
                if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -169,7 +169,7 @@ static int slice_check_fit(struct mm_struct *mm,
                           struct slice_mask mask, struct slice_mask available)
 {
        DECLARE_BITMAP(result, SLICE_NUM_HIGH);
-       unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit);
+       unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
 
        bitmap_and(result, mask.high_slices,
                   available.high_slices, slice_count);
@@ -219,7 +219,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
        mm->context.low_slices_psize = lpsizes;
 
        hpsizes = mm->context.high_slices_psize;
-       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
+       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
                mask_index = i & 0x1;
                index = i >> 1;
                if (test_bit(i, mask.high_slices))
@@ -329,8 +329,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
         * Only for that request for which high_limit is above
         * DEFAULT_MAP_WINDOW we should apply this.
         */
-       if (high_limit  > DEFAULT_MAP_WINDOW)
-               addr += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
+       if (high_limit > DEFAULT_MAP_WINDOW)
+               addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
 
        while (addr > PAGE_SIZE) {
                info.high_limit = addr;
@@ -432,8 +432,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
                        return -ENOMEM;
        }
 
-       if (high_limit > mm->context.addr_limit) {
-               mm->context.addr_limit = high_limit;
+       if (high_limit > mm->context.slb_addr_limit) {
+               mm->context.slb_addr_limit = high_limit;
                on_each_cpu(slice_flush_segments, mm, 1);
        }
 
@@ -452,7 +452,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
 
        /* Sanity checks */
        BUG_ON(mm->task_size == 0);
-       BUG_ON(mm->context.addr_limit == 0);
+       BUG_ON(mm->context.slb_addr_limit == 0);
        VM_BUG_ON(radix_enabled());
 
        slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);