]> git.baikalelectronics.ru Git - kernel.git/commitdiff
ia64/mm: enable ARCH_HAS_VM_GET_PAGE_PROT
authorAnshuman Khandual <anshuman.khandual@arm.com>
Mon, 11 Jul 2022 07:05:53 +0000 (12:35 +0530)
committerakpm <akpm@linux-foundation.org>
Mon, 18 Jul 2022 00:14:40 +0000 (17:14 -0700)
This enables ARCH_HAS_VM_GET_PAGE_PROT on the platform and exports
standard vm_get_page_prot() implementation via DECLARE_VM_GET_PAGE_PROT,
which looks up a private and static protection_map[] array.  Subsequently
all __SXXX and __PXXX macros can be dropped which are no longer needed.

Link: https://lkml.kernel.org/r/20220711070600.2378316-20-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brian Cain <bcain@quicinc.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Guo Ren <guoren@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Sam Ravnborg <sam@ravnborg.org>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/ia64/Kconfig
arch/ia64/include/asm/pgtable.h
arch/ia64/mm/init.c

index cb93769a9f2ad3d65000fd72a2feae946fc265f8..0510a57377116b0b9a4f342bdc3b01fd9db87b7b 100644 (file)
@@ -12,6 +12,7 @@ config IA64
        select ARCH_HAS_DMA_MARK_CLEAN
        select ARCH_HAS_STRNCPY_FROM_USER
        select ARCH_HAS_STRNLEN_USER
+       select ARCH_HAS_VM_GET_PAGE_PROT
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
        select ACPI
index 7aa8f2330fb19e8b970b4051f0517adf67be5bba..6925e28ae61d192ff6bfa102c9427daf5f78ce49 100644 (file)
  * attempts to write to the page.
  */
        /* xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_READONLY   /* write to priv pg -> copy & make writable */
-#define __P011 PAGE_READONLY   /* ditto */
-#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED     /* we don't have (and don't need) write-only */
-#define __S011 PAGE_SHARED
-#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
-#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
-#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
-
 #define pgd_ERROR(e)   printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
 #if CONFIG_PGTABLE_LEVELS == 4
 #define pud_ERROR(e)   printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
index 855d949d81dfea9e662988df91b44aaee3d37120..fc4e4217e87ff5a089c6d7b194a199fe23e71ca7 100644 (file)
@@ -273,7 +273,7 @@ static int __init gate_vma_init(void)
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
-       gate_vma.vm_page_prot = __P101;
+       gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
 
        return 0;
 }
@@ -490,3 +490,29 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
        __remove_pages(start_pfn, nr_pages, altmap);
 }
 #endif
+
+static const pgprot_t protection_map[16] = {
+       [VM_NONE]                                       = PAGE_NONE,
+       [VM_READ]                                       = PAGE_READONLY,
+       [VM_WRITE]                                      = PAGE_READONLY,
+       [VM_WRITE | VM_READ]                            = PAGE_READONLY,
+       [VM_EXEC]                                       = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+                                                                  _PAGE_AR_X_RX),
+       [VM_EXEC | VM_READ]                             = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+                                                                  _PAGE_AR_RX),
+       [VM_EXEC | VM_WRITE]                            = PAGE_COPY_EXEC,
+       [VM_EXEC | VM_WRITE | VM_READ]                  = PAGE_COPY_EXEC,
+       [VM_SHARED]                                     = PAGE_NONE,
+       [VM_SHARED | VM_READ]                           = PAGE_READONLY,
+       [VM_SHARED | VM_WRITE]                          = PAGE_SHARED,
+       [VM_SHARED | VM_WRITE | VM_READ]                = PAGE_SHARED,
+       [VM_SHARED | VM_EXEC]                           = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+                                                                  _PAGE_AR_X_RX),
+       [VM_SHARED | VM_EXEC | VM_READ]                 = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+                                                                  _PAGE_AR_RX),
+       [VM_SHARED | VM_EXEC | VM_WRITE]                = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+                                                                  _PAGE_AR_RWX),
+       [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]      = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
+                                                                  _PAGE_AR_RWX)
+};
+DECLARE_VM_GET_PAGE_PROT