]> git.baikalelectronics.ru Git - kernel.git/commitdiff
arm/mm: enable ARCH_HAS_VM_GET_PAGE_PROT
authorAnshuman Khandual <anshuman.khandual@arm.com>
Mon, 11 Jul 2022 07:05:57 +0000 (12:35 +0530)
committerakpm <akpm@linux-foundation.org>
Mon, 18 Jul 2022 00:14:41 +0000 (17:14 -0700)
This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports
standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT,
which looks up a private and static protection_map[] array.  Subsequently
all __SXXX and __PXXX macros can be dropped which are no longer needed.

Link: https://lkml.kernel.org/r/20220711070600.2378316-24-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Rich Felker <dalias@libc.org>
Cc: Sam Ravnborg <sam@ravnborg.org>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/arm/Kconfig
arch/arm/include/asm/pgtable.h
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/mm/mmu.c

index 7630ba9cb6ccc30cd75672c57e0aa248f5a298c9..e153b6d4fc5be2cae605459177b294b29191480d 100644 (file)
@@ -24,6 +24,7 @@ config ARM
        select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU
        select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+       select ARCH_HAS_VM_GET_PAGE_PROT
        select ARCH_HAVE_CUSTOM_GPIO_H
        select ARCH_HAVE_NMI_SAFE_CMPXCHG if CPU_V7 || CPU_V7M || CPU_V6K
        select ARCH_HAS_GCOV_PROFILE_ALL
index cd1f84bb40aea0ca6c5382ad2411e93fc394a0cc..78a532068fec2ccb083886afba64d665e8441206 100644 (file)
@@ -137,23 +137,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  *  2) If we could do execute protection, then read is implied
  *  3) write implies read permissions
  */
-#define __P000  __PAGE_NONE
-#define __P001  __PAGE_READONLY
-#define __P010  __PAGE_COPY
-#define __P011  __PAGE_COPY
-#define __P100  __PAGE_READONLY_EXEC
-#define __P101  __PAGE_READONLY_EXEC
-#define __P110  __PAGE_COPY_EXEC
-#define __P111  __PAGE_COPY_EXEC
-
-#define __S000  __PAGE_NONE
-#define __S001  __PAGE_READONLY
-#define __S010  __PAGE_SHARED
-#define __S011  __PAGE_SHARED
-#define __S100  __PAGE_READONLY_EXEC
-#define __S101  __PAGE_READONLY_EXEC
-#define __S110  __PAGE_SHARED_EXEC
-#define __S111  __PAGE_SHARED_EXEC
 
 #ifndef __ASSEMBLY__
 /*
index c30b689bec2e981851d70f7aefc18e09d5d4b8eb..14eecaaf295fabacb68ac687912eb8f8c1da7a46 100644 (file)
@@ -237,7 +237,7 @@ static int __init test_size_treshold(void)
        if (!dst_page)
                goto no_dst;
        kernel_ptr = page_address(src_page);
-       user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
+       user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__PAGE_COPY));
        if (!user_ptr)
                goto no_vmap;
 
index 5e2be37a198e29eefa1b0a3ce6b64296c4d0e2f6..2722abddd7259b0131a50626370fed86ddae7bbc 100644 (file)
@@ -405,6 +405,26 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
        local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
 }
 
+static pgprot_t protection_map[16] __ro_after_init = {
+       [VM_NONE]                                       = __PAGE_NONE,
+       [VM_READ]                                       = __PAGE_READONLY,
+       [VM_WRITE]                                      = __PAGE_COPY,
+       [VM_WRITE | VM_READ]                            = __PAGE_COPY,
+       [VM_EXEC]                                       = __PAGE_READONLY_EXEC,
+       [VM_EXEC | VM_READ]                             = __PAGE_READONLY_EXEC,
+       [VM_EXEC | VM_WRITE]                            = __PAGE_COPY_EXEC,
+       [VM_EXEC | VM_WRITE | VM_READ]                  = __PAGE_COPY_EXEC,
+       [VM_SHARED]                                     = __PAGE_NONE,
+       [VM_SHARED | VM_READ]                           = __PAGE_READONLY,
+       [VM_SHARED | VM_WRITE]                          = __PAGE_SHARED,
+       [VM_SHARED | VM_WRITE | VM_READ]                = __PAGE_SHARED,
+       [VM_SHARED | VM_EXEC]                           = __PAGE_READONLY_EXEC,
+       [VM_SHARED | VM_EXEC | VM_READ]                 = __PAGE_READONLY_EXEC,
+       [VM_SHARED | VM_EXEC | VM_WRITE]                = __PAGE_SHARED_EXEC,
+       [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]      = __PAGE_SHARED_EXEC
+};
+DECLARE_VM_GET_PAGE_PROT
+
 /*
  * Adjust the PMD section entries according to the CPU in use.
  */