]> git.baikalelectronics.ru Git - kernel.git/commitdiff
LoongArch: Add STACKTRACE support
authorQing Zhang <zhangqing@loongson.cn>
Sat, 6 Aug 2022 08:10:04 +0000 (16:10 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Fri, 12 Aug 2022 05:10:11 +0000 (13:10 +0800)
1. Use common arch_stack_walk() infrastructure to avoid duplicated code
   and avoid taking care of the stack storage and filtering.
2. Add sched_ra (means sched return address) and sched_cfa (means sched
   call frame address) to thread_info, and store them in switch_to().
3. Add __get_wchan() implementation.

Now we can print the process stack and wait channel by cat /proc/*/stack
and /proc/*/wchan.

Signed-off-by: Qing Zhang <zhangqing@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/Kconfig
arch/loongarch/include/asm/processor.h
arch/loongarch/include/asm/switch_to.h
arch/loongarch/kernel/Makefile
arch/loongarch/kernel/asm-offsets.c
arch/loongarch/kernel/process.c
arch/loongarch/kernel/stacktrace.c [new file with mode: 0644]
arch/loongarch/kernel/switch.S

index 5b4f7bdf69faa06ae3d691acc083429641aad959..947cb633744b8e3f8634ed30eda71e9ad3bebac0 100644 (file)
@@ -42,6 +42,7 @@ config LOONGARCH
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
        select ARCH_SPARSEMEM_ENABLE
+       select ARCH_STACKWALK
        select ARCH_SUPPORTS_ACPI
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_HUGETLBFS
@@ -151,6 +152,10 @@ config LOCKDEP_SUPPORT
        bool
        default y
 
+config STACKTRACE_SUPPORT
+       bool
+       default y
+
 # MACH_LOONGSON32 and MACH_LOONGSON64 are delibrately carried over from the
 # MIPS Loongson code, to preserve Loongson-specific code paths in drivers that
 # are shared between architectures, and specifically expecting the symbols.
index 57ec45aa078ec06f0e04a97a80cbe850a1e18c26..1c4b4308378d43424aa31d4df2b7c9cd239747f5 100644 (file)
@@ -101,6 +101,10 @@ struct thread_struct {
        unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
        unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
 
+       /* __schedule() return address / call frame address */
+       unsigned long sched_ra;
+       unsigned long sched_cfa;
+
        /* CSR registers */
        unsigned long csr_prmd;
        unsigned long csr_crmd;
@@ -129,6 +133,9 @@ struct thread_struct {
        struct loongarch_fpu fpu FPU_ALIGN;
 };
 
+#define thread_saved_ra(tsk)   (tsk->thread.sched_ra)
+#define thread_saved_fp(tsk)   (tsk->thread.sched_cfa)
+
 #define INIT_THREAD  {                                         \
        /*                                                      \
         * Main processor registers                             \
@@ -145,6 +152,8 @@ struct thread_struct {
        .reg29                  = 0,                            \
        .reg30                  = 0,                            \
        .reg31                  = 0,                            \
+       .sched_ra               = 0,                            \
+       .sched_cfa              = 0,                            \
        .csr_crmd               = 0,                            \
        .csr_prmd               = 0,                            \
        .csr_euen               = 0,                            \
index 2a8d043755742ddaead4d2b527b8850905fce15d..43a5ab162d38b917781a14acf3e3da92411d8a50 100644 (file)
@@ -15,12 +15,15 @@ struct task_struct;
  * @prev:      The task previously executed.
  * @next:      The task to begin executing.
  * @next_ti:   task_thread_info(next).
+ * @sched_ra:  __schedule return address.
+ * @sched_cfa: __schedule call frame address.
  *
  * This function is used whilst scheduling to save the context of prev & load
  * the context of next. Returns prev.
  */
 extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
-                       struct task_struct *next, struct thread_info *next_ti);
+                       struct task_struct *next, struct thread_info *next_ti,
+                       void *sched_ra, void *sched_cfa);
 
 /*
  * For newly created kernel threads switch_to() will return to
@@ -28,10 +31,11 @@ extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
  * That is, everything following __switch_to() will be skipped for new threads.
  * So everything that matters to new threads should be placed before __switch_to().
  */
-#define switch_to(prev, next, last)                                    \
-do {                                                                   \
-       lose_fpu_inatomic(1, prev);                                     \
-       (last) = __switch_to(prev, next, task_thread_info(next));       \
+#define switch_to(prev, next, last)                                            \
+do {                                                                           \
+       lose_fpu_inatomic(1, prev);                                             \
+       (last) = __switch_to(prev, next, task_thread_info(next),                \
+                __builtin_return_address(0), __builtin_frame_address(0));      \
 } while (0)
 
 #endif /* _ASM_SWITCH_TO_H */
index 918600e7b30f768246bbf5eb9f1fe09d41e5850e..e5be17009fe8a4ba456bf2644888811d49ef4b93 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_EFI)             += efi.o
 obj-$(CONFIG_CPU_HAS_FPU)      += fpu.o
 
 obj-$(CONFIG_MODULES)          += module.o module-sections.o
+obj-$(CONFIG_STACKTRACE)       += stacktrace.o
 
 obj-$(CONFIG_PROC_FS)          += proc.o
 
index 20cd9e16a95abb7b7677e2cfd8dc3f34212ae58d..eb350f3ffae51ee3398fd88874fb5a5e847419b5 100644 (file)
@@ -103,6 +103,8 @@ void output_thread_defines(void)
        OFFSET(THREAD_REG29, task_struct, thread.reg29);
        OFFSET(THREAD_REG30, task_struct, thread.reg30);
        OFFSET(THREAD_REG31, task_struct, thread.reg31);
+       OFFSET(THREAD_SCHED_RA, task_struct, thread.sched_ra);
+       OFFSET(THREAD_SCHED_CFA, task_struct, thread.sched_cfa);
        OFFSET(THREAD_CSRCRMD, task_struct,
               thread.csr_crmd);
        OFFSET(THREAD_CSRPRMD, task_struct,
index 839f0e9631520ed0456ef21aabd8612b00ef93ce..660492f064e7e4584fa2f04088506fc8ed7076f2 100644 (file)
@@ -135,6 +135,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
        childregs = (struct pt_regs *) childksp - 1;
        /*  Put the stack after the struct pt_regs.  */
        childksp = (unsigned long) childregs;
+       p->thread.sched_cfa = 0;
        p->thread.csr_euen = 0;
        p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
        p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
@@ -145,6 +146,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
                p->thread.reg23 = (unsigned long)args->fn;
                p->thread.reg24 = (unsigned long)args->fn_arg;
                p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
+               p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
                memset(childregs, 0, sizeof(struct pt_regs));
                childregs->csr_euen = p->thread.csr_euen;
                childregs->csr_crmd = p->thread.csr_crmd;
@@ -161,6 +163,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 
        p->thread.reg03 = (unsigned long) childregs;
        p->thread.reg01 = (unsigned long) ret_from_fork;
+       p->thread.sched_ra = (unsigned long) ret_from_fork;
 
        /*
         * New tasks lose permission to use the fpu. This accelerates context
@@ -181,7 +184,31 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 
 unsigned long __get_wchan(struct task_struct *task)
 {
-       return 0;
+       unsigned long pc;
+       struct unwind_state state;
+
+       if (!try_get_task_stack(task))
+               return 0;
+
+       unwind_start(&state, task, NULL);
+       state.sp = thread_saved_fp(task);
+       get_stack_info(state.sp, state.task, &state.stack_info);
+       state.pc = thread_saved_ra(task);
+#ifdef CONFIG_UNWINDER_PROLOGUE
+       state.type = UNWINDER_PROLOGUE;
+#endif
+       for (; !unwind_done(&state); unwind_next_frame(&state)) {
+               pc = unwind_get_return_address(&state);
+               if (!pc)
+                       break;
+               if (in_sched_functions(pc))
+                       continue;
+               break;
+       }
+
+       put_task_stack(task);
+
+       return pc;
 }
 
 bool in_irq_stack(unsigned long stack, struct stack_info *info)
diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
new file mode 100644 (file)
index 0000000..e690c1c
--- /dev/null
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Stack trace management functions
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
+
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+                    struct task_struct *task, struct pt_regs *regs)
+{
+       unsigned long addr;
+       struct pt_regs dummyregs;
+       struct unwind_state state;
+
+       regs = &dummyregs;
+
+       if (task == current) {
+               regs->regs[3] = (unsigned long)__builtin_frame_address(0);
+               regs->csr_era = (unsigned long)__builtin_return_address(0);
+       } else {
+               regs->regs[3] = thread_saved_fp(task);
+               regs->csr_era = thread_saved_ra(task);
+       }
+
+       regs->regs[1] = 0;
+       for (unwind_start(&state, task, regs);
+             !unwind_done(&state); unwind_next_frame(&state)) {
+               addr = unwind_get_return_address(&state);
+               if (!addr || !consume_entry(cookie, addr))
+                       break;
+       }
+}
index 37e84ac8ffc24d66ee6ff1855dad476fe3c80a22..43ebbc3990f73afd5fc05a357ec5882a0f63d48c 100644 (file)
@@ -21,6 +21,8 @@ SYM_FUNC_START(__switch_to)
 
        cpu_save_nonscratch a0
        stptr.d ra, a0, THREAD_REG01
+       stptr.d a3, a0, THREAD_SCHED_RA
+       stptr.d a4, a0, THREAD_SCHED_CFA
        move    tp, a2
        cpu_restore_nonscratch a1