#include <linux/init.h>
#include <linux/mm.h>
#include <linux/compat.h>
-#include <linux/hardirq.h>
+#include <linux/nmi.h>
#include <linux/syscalls.h>
#include <linux/kernel.h>
#include <linux/tracehook.h>
tile_show_stack(&kbt);
}
-/* To ensure stack dump on tiles occurs one by one. */
-static DEFINE_SPINLOCK(backtrace_lock);
-/* To ensure no backtrace occurs before all of the stack dump are done. */
-static atomic_t backtrace_cpus;
-/* The cpu mask to avoid reentrance. */
-static struct cpumask backtrace_mask;
-
-void do_nmi_dump_stack(struct pt_regs *regs)
-{
- int is_idle = is_idle_task(current) && !in_interrupt();
- int cpu;
-
- nmi_enter();
- cpu = smp_processor_id();
- if (WARN_ON_ONCE(!cpumask_test_and_clear_cpu(cpu, &backtrace_mask)))
- goto done;
-
- spin_lock(&backtrace_lock);
- if (is_idle)
- pr_info("CPU: %d idle\n", cpu);
- else
- show_regs(regs);
- spin_unlock(&backtrace_lock);
- atomic_dec(&backtrace_cpus);
-done:
- nmi_exit();
-}
-
#ifdef __tilegx__
-void arch_trigger_all_cpu_backtrace(bool self)
+void nmi_raise_cpu_backtrace(struct cpumask *in_mask)
{
struct cpumask mask;
HV_Coord tile;
unsigned int timeout;
int cpu;
- int ongoing;
HV_NMI_Info info[NR_CPUS];
- ongoing = atomic_cmpxchg(&backtrace_cpus, 0, num_online_cpus() - 1);
- if (ongoing != 0) {
- pr_err("Trying to do all-cpu backtrace.\n");
- pr_err("But another all-cpu backtrace is ongoing (%d cpus left)\n",
- ongoing);
- if (self) {
- pr_err("Reporting the stack on this cpu only.\n");
- dump_stack();
- }
- return;
- }
-
- cpumask_copy(&mask, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), &mask);
- cpumask_copy(&backtrace_mask, &mask);
-
- /* Backtrace for myself first. */
- if (self)
- dump_stack();
-
/* Tentatively dump stack on remote tiles via NMI. */
timeout = 100;
+ cpumask_copy(&mask, in_mask);
while (!cpumask_empty(&mask) && timeout) {
for_each_cpu(cpu, &mask) {
tile.x = cpu_x(cpu);
}
mdelay(10);
+ touch_softlockup_watchdog();
timeout--;
}
- /* Warn about cpus stuck in ICS and decrement their counts here. */
+ /* Warn about cpus stuck in ICS. */
if (!cpumask_empty(&mask)) {
for_each_cpu(cpu, &mask) {
+
+ /* Clear the bit as if nmi_cpu_backtrace() ran. */
+ cpumask_clear_cpu(cpu, in_mask);
+
switch (info[cpu].result) {
case HV_NMI_RESULT_FAIL_ICS:
pr_warn("Skipping stack dump of cpu %d in ICS at pc %#llx\n",
cpu);
break;
case HV_ENOSYS:
- pr_warn("Hypervisor too old to allow remote stack dumps.\n");
- goto skip_for_each;
+ WARN_ONCE(1, "Hypervisor too old to allow remote stack dumps.\n");
+ break;
default: /* should not happen */
pr_warn("Skipping stack dump of cpu %d [%d,%#llx]\n",
cpu, info[cpu].result, info[cpu].pc);
break;
}
}
-skip_for_each:
- atomic_sub(cpumask_weight(&mask), &backtrace_cpus);
}
}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_self,
+ nmi_raise_cpu_backtrace);
+}
#endif /* __tilegx_ */
#include <linux/reboot.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/nmi.h>
#include <asm/stack.h>
#include <asm/traps.h>
#include <asm/setup.h>
void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
{
+ nmi_enter();
switch (reason) {
+#ifdef arch_trigger_cpumask_backtrace
case TILE_NMI_DUMP_STACK:
- do_nmi_dump_stack(regs);
+ nmi_cpu_backtrace(regs);
break;
+#endif
default:
panic("Unexpected do_nmi type %ld", reason);
- return;
}
+ nmi_exit();
}
/* Deprecated function currently only used here. */