]> git.baikalelectronics.ru Git - kernel.git/commitdiff
ARM: implement IRQ stacks
authorArd Biesheuvel <ardb@kernel.org>
Tue, 5 Oct 2021 07:15:40 +0000 (09:15 +0200)
committerArd Biesheuvel <ardb@kernel.org>
Fri, 3 Dec 2021 14:11:31 +0000 (15:11 +0100)
Now that we no longer rely on the stack pointer to access the current
task struct or thread info, we can implement support for IRQ stacks
cleanly as well.

Define a per-CPU IRQ stack and switch to this stack when taking an IRQ,
provided that we were not already using that stack in the interrupted
context. This is never the case for IRQs taken from user space, but ones
taken while running in the kernel could fire while one taken from user
space has not completed yet.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Tested-by: Keith Packard <keithpac@amazon.com>
Acked-by: Nick Desaulniers <ndesaulniers@google.com>
Tested-by: Marc Zyngier <maz@kernel.org>
Tested-by: Vladimir Murzin <vladimir.murzin@arm.com> # ARMv7M
arch/arm/Kconfig
arch/arm/include/asm/assembler.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/irq.c
arch/arm/kernel/traps.c
arch/arm/lib/backtrace-clang.S
arch/arm/lib/backtrace.S

index 4e301e4d2da6b187ec3a2990d34647f612bd7f68..b11846679cc69c3b10963524a858597613be88d3 100644 (file)
@@ -1166,6 +1166,10 @@ config CURRENT_POINTER_IN_TPIDRURO
        def_bool y
        depends on SMP && CPU_32v6K && !CPU_V6
 
+config IRQSTACKS
+       def_bool y
+       depends on GENERIC_IRQ_MULTI_HANDLER && THREAD_INFO_IN_TASK
+
 config ARM_CPU_TOPOLOGY
        bool "Support cpu topology definition"
        depends on SMP && CPU_V7
index 870bfaea43182bb072a99a99251fdd56d9ec3e23..1b9d4df331aa1fe81c8b82cf73a7bf88b81ab254 100644 (file)
 
 #define IMM12_MASK 0xfff
 
+/* the frame pointer used for stack unwinding */
+ARM(   fpreg   .req    r11     )
+THUMB( fpreg   .req    r7      )
+
 /*
  * Enable and disable interrupts
  */
index deff286eb5ea00486e1b18a01696fa704d67f728..1c7590eef7126f49ef0549def750253c03183f67 100644 (file)
 /*
  * Interrupt handling.
  */
-       .macro  irq_handler
+       .macro  irq_handler, from_user:req
 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
        mov     r0, sp
+#ifdef CONFIG_IRQSTACKS
+       mov_l   r2, irq_stack_ptr       @ Take base address
+       mrc     p15, 0, r3, c13, c0, 4  @ Get CPU offset
+#ifdef CONFIG_UNWINDER_ARM
+       mov     fpreg, sp               @ Preserve original SP
+#else
+       mov     r8, fp                  @ Preserve original FP
+       mov     r9, sp                  @ Preserve original SP
+#endif
+       ldr     sp, [r2, r3]            @ Load SP from per-CPU var
+       .if     \from_user == 0
+UNWIND(        .setfp  fpreg, sp               )
+       @
+       @ If we took the interrupt while running in the kernel, we may already
+       @ be using the IRQ stack, so revert to the original value in that case.
+       @
+       subs    r2, sp, r0              @ SP above bottom of IRQ stack?
+       rsbscs  r2, r2, #THREAD_SIZE    @ ... and below the top?
+       movcs   sp, r0                  @ If so, revert to incoming SP
+
+#ifndef CONFIG_UNWINDER_ARM
+       @
+       @ Inform the frame pointer unwinder where the next frame lives
+       @
+       movcc   lr, pc                  @ Make LR point into .entry.text so
+                                       @ that we will get a dump of the
+                                       @ exception stack for this frame.
+#ifdef CONFIG_CC_IS_GCC
+       movcc   ip, r0                  @ Store the old SP in the frame record.
+       stmdbcc sp!, {fp, ip, lr, pc}   @ Push frame record
+       addcc   fp, sp, #12
+#else
+       stmdbcc sp!, {fp, lr}           @ Push frame record
+       movcc   fp, sp
+#endif // CONFIG_CC_IS_GCC
+#endif // CONFIG_UNWINDER_ARM
+       .endif
+#endif // CONFIG_IRQSTACKS
+
        bl      generic_handle_arch_irq
+
+#ifdef CONFIG_IRQSTACKS
+#ifdef CONFIG_UNWINDER_ARM
+       mov     sp, fpreg               @ Restore original SP
+#else
+       mov     fp, r8                  @ Restore original FP
+       mov     sp, r9                  @ Restore original SP
+#endif // CONFIG_UNWINDER_ARM
+#endif // CONFIG_IRQSTACKS
 #else
        arch_irq_handler_default
 #endif
@@ -199,7 +247,7 @@ ENDPROC(__dabt_svc)
        .align  5
 __irq_svc:
        svc_entry
-       irq_handler
+       irq_handler from_user=0
 
 #ifdef CONFIG_PREEMPTION
        ldr     r8, [tsk, #TI_PREEMPT]          @ get preempt count
@@ -426,7 +474,7 @@ ENDPROC(__dabt_usr)
 __irq_usr:
        usr_entry
        kuser_cmpxchg_check
-       irq_handler
+       irq_handler from_user=1
        get_thread_info tsk
        mov     why, #0
        b       ret_to_user_from_irq
index b79975bd988ca4bd9703ee1115beac16c842f2cb..abb0aa679bbaf7511f068806894fe63c5996ce81 100644 (file)
 
 unsigned long irq_err_count;
 
+#ifdef CONFIG_IRQSTACKS
+
+asmlinkage DEFINE_PER_CPU_READ_MOSTLY(u8 *, irq_stack_ptr);
+
+static void __init init_irq_stacks(void)
+{
+       u8 *stack;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               stack = (u8 *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+               if (WARN_ON(!stack))
+                       break;
+               per_cpu(irq_stack_ptr, cpu) = &stack[THREAD_SIZE];
+       }
+}
+
+#endif
+
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
 #ifdef CONFIG_FIQ
@@ -101,6 +120,10 @@ void __init init_IRQ(void)
 {
        int ret;
 
+#ifdef CONFIG_IRQSTACKS
+       init_irq_stacks();
+#endif
+
        if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
                irqchip_init();
        else
index 89be21ec3b521b123afea5be20f1f66ce49faf7f..b42c446cec9af9dd59a74605ba33caacfae22ba6 100644 (file)
@@ -66,6 +66,19 @@ void dump_backtrace_entry(unsigned long where, unsigned long from,
 {
        unsigned long end = frame + 4 + sizeof(struct pt_regs);
 
+       if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) &&
+           IS_ENABLED(CONFIG_CC_IS_GCC) &&
+           end > ALIGN(frame, THREAD_SIZE)) {
+               /*
+                * If we are walking past the end of the stack, it may be due
+                * to the fact that we are on an IRQ or overflow stack. In this
+                * case, we can load the address of the other stack from the
+                * frame record.
+                */
+               frame = ((unsigned long *)frame)[-2] - 4;
+               end = frame + 4 + sizeof(struct pt_regs);
+       }
+
 #ifdef CONFIG_KALLSYMS
        printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n",
                loglvl, where, (void *)where, from, (void *)from);
@@ -278,7 +291,7 @@ static int __die(const char *str, int err, struct pt_regs *regs)
 
        if (!user_mode(regs) || in_interrupt()) {
                dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
-                        THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+                        ALIGN(regs->ARM_sp, THREAD_SIZE));
                dump_backtrace(regs, tsk, KERN_EMERG);
                dump_instr(KERN_EMERG, regs);
        }
index 5b4bca85d06dbb912e6493545d50c0db52834be2..290c52a60fc65878a9304ca68c00deae8d185a8f 100644 (file)
@@ -197,6 +197,14 @@ finished_setup:
 
                cmp     sv_fp, frame            @ next frame must be
                mov     frame, sv_fp            @ above the current frame
+#ifdef CONFIG_IRQSTACKS
+               @
+               @ Kernel stacks may be discontiguous in memory. If the next
+               @ frame is below the previous frame, accept it as long as it
+               @ lives in kernel memory.
+               @
+               cmpls   sv_fp, #PAGE_OFFSET
+#endif
                bhi     for_each_frame
 
 1006:          adr     r0, .Lbad
index e8408f22d4dc97af0b0be3532dbdaf7cba08988e..293a2716bd2047b8c2f57a3ea5863c5f31ac06f9 100644 (file)
@@ -98,6 +98,14 @@ for_each_frame:      tst     frame, mask             @ Check for address exceptions
 
                cmp     sv_fp, frame            @ next frame must be
                mov     frame, sv_fp            @ above the current frame
+#ifdef CONFIG_IRQSTACKS
+               @
+               @ Kernel stacks may be discontiguous in memory. If the next
+               @ frame is below the previous frame, accept it as long as it
+               @ lives in kernel memory.
+               @
+               cmpls   sv_fp, #PAGE_OFFSET
+#endif
                bhi     for_each_frame
 
 1006:          adr     r0, .Lbad