]> git.baikalelectronics.ru Git - kernel.git/commitdiff
alpha/mm: enable ARCH_HAS_VM_GET_PAGE_PROT
authorAnshuman Khandual <anshuman.khandual@arm.com>
Mon, 11 Jul 2022 07:05:48 +0000 (12:35 +0530)
committerakpm <akpm@linux-foundation.org>
Mon, 18 Jul 2022 00:14:39 +0000 (17:14 -0700)
This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports
standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT,
which looks up a private and static protection_map[] array.  Subsequently
all __SXXX and __PXXX macros can be dropped which are no longer needed.

Link: https://lkml.kernel.org/r/20220711070600.2378316-15-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Sam Ravnborg <sam@ravnborg.org>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/alpha/Kconfig
arch/alpha/include/asm/pgtable.h
arch/alpha/mm/init.c

index 7d0d26b5b3f526effa72dff37ef09ed08334ec1f..db1c8b329461359d3788667432f0b5138bf6a35d 100644 (file)
@@ -2,6 +2,7 @@
 config ALPHA
        bool
        default y
+       select ARCH_HAS_VM_GET_PAGE_PROT
        select ARCH_32BIT_USTAT_F_TINODE
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
index 170451fde043f2953f39ed1f04275062bedc75c5..3ea9661c09ffc10c969ea4059fd8ff4a27dd71fe 100644 (file)
@@ -116,23 +116,6 @@ struct vm_area_struct;
  * arch/alpha/mm/fault.c)
  */
        /* xwr */
-#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
-#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW)
-#define __P010 _PAGE_P(_PAGE_FOE)
-#define __P011 _PAGE_P(_PAGE_FOE)
-#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR)
-#define __P101 _PAGE_P(_PAGE_FOW)
-#define __P110 _PAGE_P(0)
-#define __P111 _PAGE_P(0)
-
-#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR)
-#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW)
-#define __S010 _PAGE_S(_PAGE_FOE)
-#define __S011 _PAGE_S(_PAGE_FOE)
-#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR)
-#define __S101 _PAGE_S(_PAGE_FOW)
-#define __S110 _PAGE_S(0)
-#define __S111 _PAGE_S(0)
 
 /*
  * pgprot_noncached() is only for infiniband pci support, and a real
index 7511723b76693c1205ca32ed0c6c89ab31c4bffa..a155180d7a837be3ae49a929ab65a86a90d6743c 100644 (file)
@@ -280,3 +280,25 @@ mem_init(void)
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
        memblock_free_all();
 }
+
+static const pgprot_t protection_map[16] = {
+       [VM_NONE]                                       = _PAGE_P(_PAGE_FOE | _PAGE_FOW |
+                                                                 _PAGE_FOR),
+       [VM_READ]                                       = _PAGE_P(_PAGE_FOE | _PAGE_FOW),
+       [VM_WRITE]                                      = _PAGE_P(_PAGE_FOE),
+       [VM_WRITE | VM_READ]                            = _PAGE_P(_PAGE_FOE),
+       [VM_EXEC]                                       = _PAGE_P(_PAGE_FOW | _PAGE_FOR),
+       [VM_EXEC | VM_READ]                             = _PAGE_P(_PAGE_FOW),
+       [VM_EXEC | VM_WRITE]                            = _PAGE_P(0),
+       [VM_EXEC | VM_WRITE | VM_READ]                  = _PAGE_P(0),
+       [VM_SHARED]                                     = _PAGE_S(_PAGE_FOE | _PAGE_FOW |
+                                                                 _PAGE_FOR),
+       [VM_SHARED | VM_READ]                           = _PAGE_S(_PAGE_FOE | _PAGE_FOW),
+       [VM_SHARED | VM_WRITE]                          = _PAGE_S(_PAGE_FOE),
+       [VM_SHARED | VM_WRITE | VM_READ]                = _PAGE_S(_PAGE_FOE),
+       [VM_SHARED | VM_EXEC]                           = _PAGE_S(_PAGE_FOW | _PAGE_FOR),
+       [VM_SHARED | VM_EXEC | VM_READ]                 = _PAGE_S(_PAGE_FOW),
+       [VM_SHARED | VM_EXEC | VM_WRITE]                = _PAGE_S(0),
+       [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]      = _PAGE_S(0)
+};
+DECLARE_VM_GET_PAGE_PROT