blob: 0fc9803d923bad3f6a2e2b4e39c0332f4ef5440c
1 | /* |
2 | * NMI backtrace support |
3 | * |
4 | * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King, |
5 | * with the following header: |
6 | * |
7 | * HW NMI watchdog support |
8 | * |
9 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
10 | * |
11 | * Arch specific calls to support NMI watchdog |
12 | * |
13 | * Bits copied from original nmi.c file |
14 | */ |
15 | #include <linux/cpumask.h> |
16 | #include <linux/delay.h> |
17 | #include <linux/kprobes.h> |
18 | #include <linux/nmi.h> |
19 | #include <linux/cpu.h> |
20 | |
21 | #ifdef arch_trigger_cpumask_backtrace |
22 | /* For reliability, we're prepared to waste bits here. */ |
23 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; |
24 | |
25 | /* "in progress" flag of arch_trigger_cpumask_backtrace */ |
26 | static unsigned long backtrace_flag; |
27 | |
28 | /* |
29 | * When raise() is called it will be passed a pointer to the |
30 | * backtrace_mask. Architectures that call nmi_cpu_backtrace() |
31 | * directly from their raise() functions may rely on the mask |
32 | * they are passed being updated as a side effect of this call. |
33 | */ |
34 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
35 | bool exclude_self, |
36 | void (*raise)(cpumask_t *mask)) |
37 | { |
38 | int i, this_cpu = get_cpu(); |
39 | |
40 | if (test_and_set_bit(0, &backtrace_flag)) { |
41 | /* |
42 | * If there is already a trigger_all_cpu_backtrace() in progress |
43 | * (backtrace_flag == 1), don't output double cpu dump infos. |
44 | */ |
45 | put_cpu(); |
46 | return; |
47 | } |
48 | |
49 | cpumask_copy(to_cpumask(backtrace_mask), mask); |
50 | if (exclude_self) |
51 | cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); |
52 | |
53 | /* |
54 | * Don't try to send an NMI to this cpu; it may work on some |
55 | * architectures, but on others it may not, and we'll get |
56 | * information at least as useful just by doing a dump_stack() here. |
57 | * Note that nmi_cpu_backtrace(NULL) will clear the cpu bit. |
58 | */ |
59 | if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask))) |
60 | nmi_cpu_backtrace(NULL); |
61 | |
62 | if (!cpumask_empty(to_cpumask(backtrace_mask))) { |
63 | pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n", |
64 | this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); |
65 | raise(to_cpumask(backtrace_mask)); |
66 | } |
67 | |
68 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
69 | for (i = 0; i < 10 * 1000; i++) { |
70 | if (cpumask_empty(to_cpumask(backtrace_mask))) |
71 | break; |
72 | mdelay(1); |
73 | touch_softlockup_watchdog(); |
74 | } |
75 | |
76 | /* |
77 | * Force flush any remote buffers that might be stuck in IRQ context |
78 | * and therefore could not run their irq_work. |
79 | */ |
80 | printk_nmi_flush(); |
81 | |
82 | clear_bit_unlock(0, &backtrace_flag); |
83 | put_cpu(); |
84 | } |
85 | |
86 | bool nmi_cpu_backtrace(struct pt_regs *regs) |
87 | { |
88 | int cpu = smp_processor_id(); |
89 | |
90 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
91 | if (regs && cpu_in_idle(instruction_pointer(regs))) { |
92 | pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", |
93 | cpu, instruction_pointer(regs)); |
94 | } else { |
95 | pr_warn("NMI backtrace for cpu %d\n", cpu); |
96 | /* |
97 | * two reasons for not calling show_regs here |
98 | * 1. two many logs(100 lines per second) are |
99 | * introduced, which makes the wanted stack |
100 | * infos missed |
101 | * 2. leads to potential external abort on |
102 | * non-linefetch issue |
103 | */ |
104 | #ifndef CONFIG_AMLOGIC_MODIFY |
105 | if (regs) |
106 | show_regs(regs); |
107 | else |
108 | #endif |
109 | dump_stack(); |
110 | } |
111 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
112 | return true; |
113 | } |
114 | |
115 | return false; |
116 | } |
117 | NOKPROBE_SYMBOL(nmi_cpu_backtrace); |
118 | #endif |
119 |