GNU Linux-libre 4.14.266-gnu1
[releases.git] / kernel / debug / debug_core.c
1 /*
2  * Kernel Debug Core
3  *
4  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
5  *
6  * Copyright (C) 2000-2001 VERITAS Software Corporation.
7  * Copyright (C) 2002-2004 Timesys Corporation
8  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
9  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
10  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
11  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
12  * Copyright (C) 2005-2009 Wind River Systems, Inc.
13  * Copyright (C) 2007 MontaVista Software, Inc.
14  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
15  *
16  * Contributors at various stages not listed above:
17  *  Jason Wessel ( jason.wessel@windriver.com )
18  *  George Anzinger <george@mvista.com>
19  *  Anurekh Saxena (anurekh.saxena@timesys.com)
20  *  Lake Stevens Instrument Division (Glenn Engel)
21  *  Jim Kingdon, Cygnus Support.
22  *
23  * Original KGDB stub: David Grothe <dave@gcom.com>,
24  * Tigran Aivazian <tigran@sco.com>
25  *
26  * This file is licensed under the terms of the GNU General Public License
27  * version 2. This program is licensed "as is" without any warranty of any
28  * kind, whether express or implied.
29  */
30
31 #define pr_fmt(fmt) "KGDB: " fmt
32
33 #include <linux/pid_namespace.h>
34 #include <linux/clocksource.h>
35 #include <linux/serial_core.h>
36 #include <linux/interrupt.h>
37 #include <linux/spinlock.h>
38 #include <linux/console.h>
39 #include <linux/threads.h>
40 #include <linux/uaccess.h>
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/delay.h>
46 #include <linux/sched.h>
47 #include <linux/sysrq.h>
48 #include <linux/reboot.h>
49 #include <linux/init.h>
50 #include <linux/kgdb.h>
51 #include <linux/kdb.h>
52 #include <linux/nmi.h>
53 #include <linux/pid.h>
54 #include <linux/smp.h>
55 #include <linux/mm.h>
56 #include <linux/vmacache.h>
57 #include <linux/rcupdate.h>
58
59 #include <asm/cacheflush.h>
60 #include <asm/byteorder.h>
61 #include <linux/atomic.h>
62
63 #include "debug_core.h"
64
65 static int kgdb_break_asap;
66
67 struct debuggerinfo_struct kgdb_info[NR_CPUS];
68
69 /**
70  * kgdb_connected - Is a host GDB connected to us?
71  */
72 int                             kgdb_connected;
73 EXPORT_SYMBOL_GPL(kgdb_connected);
74
75 /* All the KGDB handlers are installed */
76 int                     kgdb_io_module_registered;
77
78 /* Guard for recursive entry */
79 static int                      exception_level;
80
81 struct kgdb_io          *dbg_io_ops;
82 static DEFINE_SPINLOCK(kgdb_registration_lock);
83
84 /* Action for the reboot notifiter, a global allow kdb to change it */
85 static int kgdbreboot;
86 /* kgdb console driver is loaded */
87 static int kgdb_con_registered;
88 /* determine if kgdb console output should be used */
89 static int kgdb_use_con;
90 /* Flag for alternate operations for early debugging */
91 bool dbg_is_early = true;
92 /* Next cpu to become the master debug core */
93 int dbg_switch_cpu;
94
95 /* Use kdb or gdbserver mode */
96 int dbg_kdb_mode = 1;
97
98 module_param(kgdb_use_con, int, 0644);
99 module_param(kgdbreboot, int, 0644);
100
101 /*
102  * Holds information about breakpoints in a kernel. These breakpoints are
103  * added and removed by gdb.
104  */
105 static struct kgdb_bkpt         kgdb_break[KGDB_MAX_BREAKPOINTS] = {
106         [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
107 };
108
109 /*
110  * The CPU# of the active CPU, or -1 if none:
111  */
112 atomic_t                        kgdb_active = ATOMIC_INIT(-1);
113 EXPORT_SYMBOL_GPL(kgdb_active);
114 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
115 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
116
117 /*
118  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
119  * bootup code (which might not have percpu set up yet):
120  */
121 static atomic_t                 masters_in_kgdb;
122 static atomic_t                 slaves_in_kgdb;
123 static atomic_t                 kgdb_break_tasklet_var;
124 atomic_t                        kgdb_setting_breakpoint;
125
126 struct task_struct              *kgdb_usethread;
127 struct task_struct              *kgdb_contthread;
128
129 int                             kgdb_single_step;
130 static pid_t                    kgdb_sstep_pid;
131
132 /* to keep track of the CPU which is doing the single stepping*/
133 atomic_t                        kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
134
135 /*
136  * If you are debugging a problem where roundup (the collection of
137  * all other CPUs) is a problem [this should be extremely rare],
138  * then use the nokgdbroundup option to avoid roundup. In that case
139  * the other CPUs might interfere with your debugging context, so
140  * use this with care:
141  */
142 static int kgdb_do_roundup = 1;
143
144 static int __init opt_nokgdbroundup(char *str)
145 {
146         kgdb_do_roundup = 0;
147
148         return 0;
149 }
150
151 early_param("nokgdbroundup", opt_nokgdbroundup);
152
153 /*
154  * Finally, some KGDB code :-)
155  */
156
157 /*
158  * Weak aliases for breakpoint management,
159  * can be overriden by architectures when needed:
160  */
161 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
162 {
163         int err;
164
165         err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
166                                 BREAK_INSTR_SIZE);
167         if (err)
168                 return err;
169         err = probe_kernel_write((char *)bpt->bpt_addr,
170                                  arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
171         return err;
172 }
173
174 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
175 {
176         return probe_kernel_write((char *)bpt->bpt_addr,
177                                   (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
178 }
179
180 int __weak kgdb_validate_break_address(unsigned long addr)
181 {
182         struct kgdb_bkpt tmp;
183         int err;
184         /* Validate setting the breakpoint and then removing it.  If the
185          * remove fails, the kernel needs to emit a bad message because we
186          * are deep trouble not being able to put things back the way we
187          * found them.
188          */
189         tmp.bpt_addr = addr;
190         err = kgdb_arch_set_breakpoint(&tmp);
191         if (err)
192                 return err;
193         err = kgdb_arch_remove_breakpoint(&tmp);
194         if (err)
195                 pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
196                        addr);
197         return err;
198 }
199
200 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
201 {
202         return instruction_pointer(regs);
203 }
204
205 int __weak kgdb_arch_init(void)
206 {
207         return 0;
208 }
209
210 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
211 {
212         return 0;
213 }
214
215 /*
216  * Some architectures need cache flushes when we set/clear a
217  * breakpoint:
218  */
219 static void kgdb_flush_swbreak_addr(unsigned long addr)
220 {
221         if (!CACHE_FLUSH_IS_SAFE)
222                 return;
223
224         if (current->mm) {
225                 int i;
226
227                 for (i = 0; i < VMACACHE_SIZE; i++) {
228                         if (!current->vmacache.vmas[i])
229                                 continue;
230                         flush_cache_range(current->vmacache.vmas[i],
231                                           addr, addr + BREAK_INSTR_SIZE);
232                 }
233         }
234
235         /* Force flush instruction cache if it was outside the mm */
236         flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
237 }
238
239 /*
240  * SW breakpoint management:
241  */
242 int dbg_activate_sw_breakpoints(void)
243 {
244         int error;
245         int ret = 0;
246         int i;
247
248         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
249                 if (kgdb_break[i].state != BP_SET)
250                         continue;
251
252                 error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
253                 if (error) {
254                         ret = error;
255                         pr_info("BP install failed: %lx\n",
256                                 kgdb_break[i].bpt_addr);
257                         continue;
258                 }
259
260                 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
261                 kgdb_break[i].state = BP_ACTIVE;
262         }
263         return ret;
264 }
265
266 int dbg_set_sw_break(unsigned long addr)
267 {
268         int err = kgdb_validate_break_address(addr);
269         int breakno = -1;
270         int i;
271
272         if (err)
273                 return err;
274
275         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
276                 if ((kgdb_break[i].state == BP_SET) &&
277                                         (kgdb_break[i].bpt_addr == addr))
278                         return -EEXIST;
279         }
280         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
281                 if (kgdb_break[i].state == BP_REMOVED &&
282                                         kgdb_break[i].bpt_addr == addr) {
283                         breakno = i;
284                         break;
285                 }
286         }
287
288         if (breakno == -1) {
289                 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
290                         if (kgdb_break[i].state == BP_UNDEFINED) {
291                                 breakno = i;
292                                 break;
293                         }
294                 }
295         }
296
297         if (breakno == -1)
298                 return -E2BIG;
299
300         kgdb_break[breakno].state = BP_SET;
301         kgdb_break[breakno].type = BP_BREAKPOINT;
302         kgdb_break[breakno].bpt_addr = addr;
303
304         return 0;
305 }
306
307 int dbg_deactivate_sw_breakpoints(void)
308 {
309         int error;
310         int ret = 0;
311         int i;
312
313         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
314                 if (kgdb_break[i].state != BP_ACTIVE)
315                         continue;
316                 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
317                 if (error) {
318                         pr_info("BP remove failed: %lx\n",
319                                 kgdb_break[i].bpt_addr);
320                         ret = error;
321                 }
322
323                 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
324                 kgdb_break[i].state = BP_SET;
325         }
326         return ret;
327 }
328
329 int dbg_remove_sw_break(unsigned long addr)
330 {
331         int i;
332
333         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
334                 if ((kgdb_break[i].state == BP_SET) &&
335                                 (kgdb_break[i].bpt_addr == addr)) {
336                         kgdb_break[i].state = BP_REMOVED;
337                         return 0;
338                 }
339         }
340         return -ENOENT;
341 }
342
343 int kgdb_isremovedbreak(unsigned long addr)
344 {
345         int i;
346
347         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
348                 if ((kgdb_break[i].state == BP_REMOVED) &&
349                                         (kgdb_break[i].bpt_addr == addr))
350                         return 1;
351         }
352         return 0;
353 }
354
355 int dbg_remove_all_break(void)
356 {
357         int error;
358         int i;
359
360         /* Clear memory breakpoints. */
361         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
362                 if (kgdb_break[i].state != BP_ACTIVE)
363                         goto setundefined;
364                 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
365                 if (error)
366                         pr_err("breakpoint remove failed: %lx\n",
367                                kgdb_break[i].bpt_addr);
368 setundefined:
369                 kgdb_break[i].state = BP_UNDEFINED;
370         }
371
372         /* Clear hardware breakpoints. */
373         if (arch_kgdb_ops.remove_all_hw_break)
374                 arch_kgdb_ops.remove_all_hw_break();
375
376         return 0;
377 }
378
379 /*
380  * Return true if there is a valid kgdb I/O module.  Also if no
381  * debugger is attached a message can be printed to the console about
382  * waiting for the debugger to attach.
383  *
384  * The print_wait argument is only to be true when called from inside
385  * the core kgdb_handle_exception, because it will wait for the
386  * debugger to attach.
387  */
388 static int kgdb_io_ready(int print_wait)
389 {
390         if (!dbg_io_ops)
391                 return 0;
392         if (kgdb_connected)
393                 return 1;
394         if (atomic_read(&kgdb_setting_breakpoint))
395                 return 1;
396         if (print_wait) {
397 #ifdef CONFIG_KGDB_KDB
398                 if (!dbg_kdb_mode)
399                         pr_crit("waiting... or $3#33 for KDB\n");
400 #else
401                 pr_crit("Waiting for remote debugger\n");
402 #endif
403         }
404         return 1;
405 }
406
407 static int kgdb_reenter_check(struct kgdb_state *ks)
408 {
409         unsigned long addr;
410
411         if (atomic_read(&kgdb_active) != raw_smp_processor_id())
412                 return 0;
413
414         /* Panic on recursive debugger calls: */
415         exception_level++;
416         addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
417         dbg_deactivate_sw_breakpoints();
418
419         /*
420          * If the break point removed ok at the place exception
421          * occurred, try to recover and print a warning to the end
422          * user because the user planted a breakpoint in a place that
423          * KGDB needs in order to function.
424          */
425         if (dbg_remove_sw_break(addr) == 0) {
426                 exception_level = 0;
427                 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
428                 dbg_activate_sw_breakpoints();
429                 pr_crit("re-enter error: breakpoint removed %lx\n", addr);
430                 WARN_ON_ONCE(1);
431
432                 return 1;
433         }
434         dbg_remove_all_break();
435         kgdb_skipexception(ks->ex_vector, ks->linux_regs);
436
437         if (exception_level > 1) {
438                 dump_stack();
439                 kgdb_io_module_registered = false;
440                 panic("Recursive entry to debugger");
441         }
442
443         pr_crit("re-enter exception: ALL breakpoints killed\n");
444 #ifdef CONFIG_KGDB_KDB
445         /* Allow kdb to debug itself one level */
446         return 0;
447 #endif
448         dump_stack();
449         panic("Recursive entry to debugger");
450
451         return 1;
452 }
453
454 static void dbg_touch_watchdogs(void)
455 {
456         touch_softlockup_watchdog_sync();
457         clocksource_touch_watchdog();
458         rcu_cpu_stall_reset();
459 }
460
461 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
462                 int exception_state)
463 {
464         unsigned long flags;
465         int sstep_tries = 100;
466         int error;
467         int cpu;
468         int trace_on = 0;
469         int online_cpus = num_online_cpus();
470         u64 time_left;
471
472         kgdb_info[ks->cpu].enter_kgdb++;
473         kgdb_info[ks->cpu].exception_state |= exception_state;
474
475         if (exception_state == DCPU_WANT_MASTER)
476                 atomic_inc(&masters_in_kgdb);
477         else
478                 atomic_inc(&slaves_in_kgdb);
479
480         if (arch_kgdb_ops.disable_hw_break)
481                 arch_kgdb_ops.disable_hw_break(regs);
482
483 acquirelock:
484         rcu_read_lock();
485         /*
486          * Interrupts will be restored by the 'trap return' code, except when
487          * single stepping.
488          */
489         local_irq_save(flags);
490
491         cpu = ks->cpu;
492         kgdb_info[cpu].debuggerinfo = regs;
493         kgdb_info[cpu].task = current;
494         kgdb_info[cpu].ret_state = 0;
495         kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
496
497         /* Make sure the above info reaches the primary CPU */
498         smp_mb();
499
500         if (exception_level == 1) {
501                 if (raw_spin_trylock(&dbg_master_lock))
502                         atomic_xchg(&kgdb_active, cpu);
503                 goto cpu_master_loop;
504         }
505
506         /*
507          * CPU will loop if it is a slave or request to become a kgdb
508          * master cpu and acquire the kgdb_active lock:
509          */
510         while (1) {
511 cpu_loop:
512                 if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
513                         kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
514                         goto cpu_master_loop;
515                 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
516                         if (raw_spin_trylock(&dbg_master_lock)) {
517                                 atomic_xchg(&kgdb_active, cpu);
518                                 break;
519                         }
520                 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
521                         if (!raw_spin_is_locked(&dbg_slave_lock))
522                                 goto return_normal;
523                 } else {
524 return_normal:
525                         /* Return to normal operation by executing any
526                          * hw breakpoint fixup.
527                          */
528                         if (arch_kgdb_ops.correct_hw_break)
529                                 arch_kgdb_ops.correct_hw_break();
530                         if (trace_on)
531                                 tracing_on();
532                         kgdb_info[cpu].debuggerinfo = NULL;
533                         kgdb_info[cpu].task = NULL;
534                         kgdb_info[cpu].exception_state &=
535                                 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
536                         kgdb_info[cpu].enter_kgdb--;
537                         smp_mb__before_atomic();
538                         atomic_dec(&slaves_in_kgdb);
539                         dbg_touch_watchdogs();
540                         local_irq_restore(flags);
541                         rcu_read_unlock();
542                         return 0;
543                 }
544                 cpu_relax();
545         }
546
547         /*
548          * For single stepping, try to only enter on the processor
549          * that was single stepping.  To guard against a deadlock, the
550          * kernel will only try for the value of sstep_tries before
551          * giving up and continuing on.
552          */
553         if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
554             (kgdb_info[cpu].task &&
555              kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
556                 atomic_set(&kgdb_active, -1);
557                 raw_spin_unlock(&dbg_master_lock);
558                 dbg_touch_watchdogs();
559                 local_irq_restore(flags);
560                 rcu_read_unlock();
561
562                 goto acquirelock;
563         }
564
565         if (!kgdb_io_ready(1)) {
566                 kgdb_info[cpu].ret_state = 1;
567                 goto kgdb_restore; /* No I/O connection, resume the system */
568         }
569
570         /*
571          * Don't enter if we have hit a removed breakpoint.
572          */
573         if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
574                 goto kgdb_restore;
575
576         /* Call the I/O driver's pre_exception routine */
577         if (dbg_io_ops->pre_exception)
578                 dbg_io_ops->pre_exception();
579
580         /*
581          * Get the passive CPU lock which will hold all the non-primary
582          * CPU in a spin state while the debugger is active
583          */
584         if (!kgdb_single_step)
585                 raw_spin_lock(&dbg_slave_lock);
586
587 #ifdef CONFIG_SMP
588         /* If send_ready set, slaves are already waiting */
589         if (ks->send_ready)
590                 atomic_set(ks->send_ready, 1);
591
592         /* Signal the other CPUs to enter kgdb_wait() */
593         else if ((!kgdb_single_step) && kgdb_do_roundup)
594                 kgdb_roundup_cpus(flags);
595 #endif
596
597         /*
598          * Wait for the other CPUs to be notified and be waiting for us:
599          */
600         time_left = MSEC_PER_SEC;
601         while (kgdb_do_roundup && --time_left &&
602                (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
603                    online_cpus)
604                 udelay(1000);
605         if (!time_left)
606                 pr_crit("Timed out waiting for secondary CPUs.\n");
607
608         /*
609          * At this point the primary processor is completely
610          * in the debugger and all secondary CPUs are quiescent
611          */
612         dbg_deactivate_sw_breakpoints();
613         kgdb_single_step = 0;
614         kgdb_contthread = current;
615         exception_level = 0;
616         trace_on = tracing_is_on();
617         if (trace_on)
618                 tracing_off();
619
620         while (1) {
621 cpu_master_loop:
622                 if (dbg_kdb_mode) {
623                         kgdb_connected = 1;
624                         error = kdb_stub(ks);
625                         if (error == -1)
626                                 continue;
627                         kgdb_connected = 0;
628                 } else {
629                         error = gdb_serial_stub(ks);
630                 }
631
632                 if (error == DBG_PASS_EVENT) {
633                         dbg_kdb_mode = !dbg_kdb_mode;
634                 } else if (error == DBG_SWITCH_CPU_EVENT) {
635                         kgdb_info[dbg_switch_cpu].exception_state |=
636                                 DCPU_NEXT_MASTER;
637                         goto cpu_loop;
638                 } else {
639                         kgdb_info[cpu].ret_state = error;
640                         break;
641                 }
642         }
643
644         /* Call the I/O driver's post_exception routine */
645         if (dbg_io_ops->post_exception)
646                 dbg_io_ops->post_exception();
647
648         if (!kgdb_single_step) {
649                 raw_spin_unlock(&dbg_slave_lock);
650                 /* Wait till all the CPUs have quit from the debugger. */
651                 while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
652                         cpu_relax();
653         }
654
655 kgdb_restore:
656         if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
657                 int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
658                 if (kgdb_info[sstep_cpu].task)
659                         kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
660                 else
661                         kgdb_sstep_pid = 0;
662         }
663         if (arch_kgdb_ops.correct_hw_break)
664                 arch_kgdb_ops.correct_hw_break();
665         if (trace_on)
666                 tracing_on();
667
668         kgdb_info[cpu].debuggerinfo = NULL;
669         kgdb_info[cpu].task = NULL;
670         kgdb_info[cpu].exception_state &=
671                 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
672         kgdb_info[cpu].enter_kgdb--;
673         smp_mb__before_atomic();
674         atomic_dec(&masters_in_kgdb);
675         /* Free kgdb_active */
676         atomic_set(&kgdb_active, -1);
677         raw_spin_unlock(&dbg_master_lock);
678         dbg_touch_watchdogs();
679         local_irq_restore(flags);
680         rcu_read_unlock();
681
682         return kgdb_info[cpu].ret_state;
683 }
684
685 /*
686  * kgdb_handle_exception() - main entry point from a kernel exception
687  *
688  * Locking hierarchy:
689  *      interface locks, if any (begin_session)
690  *      kgdb lock (kgdb_active)
691  */
692 int
693 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
694 {
695         struct kgdb_state kgdb_var;
696         struct kgdb_state *ks = &kgdb_var;
697         int ret = 0;
698
699         if (arch_kgdb_ops.enable_nmi)
700                 arch_kgdb_ops.enable_nmi(0);
701         /*
702          * Avoid entering the debugger if we were triggered due to an oops
703          * but panic_timeout indicates the system should automatically
704          * reboot on panic. We don't want to get stuck waiting for input
705          * on such systems, especially if its "just" an oops.
706          */
707         if (signo != SIGTRAP && panic_timeout)
708                 return 1;
709
710         memset(ks, 0, sizeof(struct kgdb_state));
711         ks->cpu                 = raw_smp_processor_id();
712         ks->ex_vector           = evector;
713         ks->signo               = signo;
714         ks->err_code            = ecode;
715         ks->linux_regs          = regs;
716
717         if (kgdb_reenter_check(ks))
718                 goto out; /* Ouch, double exception ! */
719         if (kgdb_info[ks->cpu].enter_kgdb != 0)
720                 goto out;
721
722         ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
723 out:
724         if (arch_kgdb_ops.enable_nmi)
725                 arch_kgdb_ops.enable_nmi(1);
726         return ret;
727 }
728
729 /*
730  * GDB places a breakpoint at this function to know dynamically
731  * loaded objects. It's not defined static so that only one instance with this
732  * name exists in the kernel.
733  */
734
735 static int module_event(struct notifier_block *self, unsigned long val,
736         void *data)
737 {
738         return 0;
739 }
740
741 static struct notifier_block dbg_module_load_nb = {
742         .notifier_call  = module_event,
743 };
744
745 int kgdb_nmicallback(int cpu, void *regs)
746 {
747 #ifdef CONFIG_SMP
748         struct kgdb_state kgdb_var;
749         struct kgdb_state *ks = &kgdb_var;
750
751         memset(ks, 0, sizeof(struct kgdb_state));
752         ks->cpu                 = cpu;
753         ks->linux_regs          = regs;
754
755         if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
756                         raw_spin_is_locked(&dbg_master_lock)) {
757                 kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
758                 return 0;
759         }
760 #endif
761         return 1;
762 }
763
764 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
765                                                         atomic_t *send_ready)
766 {
767 #ifdef CONFIG_SMP
768         if (!kgdb_io_ready(0) || !send_ready)
769                 return 1;
770
771         if (kgdb_info[cpu].enter_kgdb == 0) {
772                 struct kgdb_state kgdb_var;
773                 struct kgdb_state *ks = &kgdb_var;
774
775                 memset(ks, 0, sizeof(struct kgdb_state));
776                 ks->cpu                 = cpu;
777                 ks->ex_vector           = trapnr;
778                 ks->signo               = SIGTRAP;
779                 ks->err_code            = err_code;
780                 ks->linux_regs          = regs;
781                 ks->send_ready          = send_ready;
782                 kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
783                 return 0;
784         }
785 #endif
786         return 1;
787 }
788
789 static void kgdb_console_write(struct console *co, const char *s,
790    unsigned count)
791 {
792         unsigned long flags;
793
794         /* If we're debugging, or KGDB has not connected, don't try
795          * and print. */
796         if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
797                 return;
798
799         local_irq_save(flags);
800         gdbstub_msg_write(s, count);
801         local_irq_restore(flags);
802 }
803
804 static struct console kgdbcons = {
805         .name           = "kgdb",
806         .write          = kgdb_console_write,
807         .flags          = CON_PRINTBUFFER | CON_ENABLED,
808         .index          = -1,
809 };
810
811 static int __init opt_kgdb_con(char *str)
812 {
813         kgdb_use_con = 1;
814
815         if (kgdb_io_module_registered && !kgdb_con_registered) {
816                 register_console(&kgdbcons);
817                 kgdb_con_registered = 1;
818         }
819
820         return 0;
821 }
822
823 early_param("kgdbcon", opt_kgdb_con);
824
825 #ifdef CONFIG_MAGIC_SYSRQ
826 static void sysrq_handle_dbg(int key)
827 {
828         if (!dbg_io_ops) {
829                 pr_crit("ERROR: No KGDB I/O module available\n");
830                 return;
831         }
832         if (!kgdb_connected) {
833 #ifdef CONFIG_KGDB_KDB
834                 if (!dbg_kdb_mode)
835                         pr_crit("KGDB or $3#33 for KDB\n");
836 #else
837                 pr_crit("Entering KGDB\n");
838 #endif
839         }
840
841         kgdb_breakpoint();
842 }
843
844 static struct sysrq_key_op sysrq_dbg_op = {
845         .handler        = sysrq_handle_dbg,
846         .help_msg       = "debug(g)",
847         .action_msg     = "DEBUG",
848 };
849 #endif
850
851 static int kgdb_panic_event(struct notifier_block *self,
852                             unsigned long val,
853                             void *data)
854 {
855         /*
856          * Avoid entering the debugger if we were triggered due to a panic
857          * We don't want to get stuck waiting for input from user in such case.
858          * panic_timeout indicates the system should automatically
859          * reboot on panic.
860          */
861         if (panic_timeout)
862                 return NOTIFY_DONE;
863
864         if (dbg_kdb_mode)
865                 kdb_printf("PANIC: %s\n", (char *)data);
866         kgdb_breakpoint();
867         return NOTIFY_DONE;
868 }
869
870 static struct notifier_block kgdb_panic_event_nb = {
871        .notifier_call   = kgdb_panic_event,
872        .priority        = INT_MAX,
873 };
874
875 void __weak kgdb_arch_late(void)
876 {
877 }
878
879 void __init dbg_late_init(void)
880 {
881         dbg_is_early = false;
882         if (kgdb_io_module_registered)
883                 kgdb_arch_late();
884         kdb_init(KDB_INIT_FULL);
885 }
886
887 static int
888 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
889 {
890         /*
891          * Take the following action on reboot notify depending on value:
892          *    1 == Enter debugger
893          *    0 == [the default] detatch debug client
894          *   -1 == Do nothing... and use this until the board resets
895          */
896         switch (kgdbreboot) {
897         case 1:
898                 kgdb_breakpoint();
899         case -1:
900                 goto done;
901         }
902         if (!dbg_kdb_mode)
903                 gdbstub_exit(code);
904 done:
905         return NOTIFY_DONE;
906 }
907
908 static struct notifier_block dbg_reboot_notifier = {
909         .notifier_call          = dbg_notify_reboot,
910         .next                   = NULL,
911         .priority               = INT_MAX,
912 };
913
914 static void kgdb_register_callbacks(void)
915 {
916         if (!kgdb_io_module_registered) {
917                 kgdb_io_module_registered = 1;
918                 kgdb_arch_init();
919                 if (!dbg_is_early)
920                         kgdb_arch_late();
921                 register_module_notifier(&dbg_module_load_nb);
922                 register_reboot_notifier(&dbg_reboot_notifier);
923                 atomic_notifier_chain_register(&panic_notifier_list,
924                                                &kgdb_panic_event_nb);
925 #ifdef CONFIG_MAGIC_SYSRQ
926                 register_sysrq_key('g', &sysrq_dbg_op);
927 #endif
928                 if (kgdb_use_con && !kgdb_con_registered) {
929                         register_console(&kgdbcons);
930                         kgdb_con_registered = 1;
931                 }
932         }
933 }
934
935 static void kgdb_unregister_callbacks(void)
936 {
937         /*
938          * When this routine is called KGDB should unregister from the
939          * panic handler and clean up, making sure it is not handling any
940          * break exceptions at the time.
941          */
942         if (kgdb_io_module_registered) {
943                 kgdb_io_module_registered = 0;
944                 unregister_reboot_notifier(&dbg_reboot_notifier);
945                 unregister_module_notifier(&dbg_module_load_nb);
946                 atomic_notifier_chain_unregister(&panic_notifier_list,
947                                                &kgdb_panic_event_nb);
948                 kgdb_arch_exit();
949 #ifdef CONFIG_MAGIC_SYSRQ
950                 unregister_sysrq_key('g', &sysrq_dbg_op);
951 #endif
952                 if (kgdb_con_registered) {
953                         unregister_console(&kgdbcons);
954                         kgdb_con_registered = 0;
955                 }
956         }
957 }
958
959 /*
960  * There are times a tasklet needs to be used vs a compiled in
961  * break point so as to cause an exception outside a kgdb I/O module,
962  * such as is the case with kgdboe, where calling a breakpoint in the
963  * I/O driver itself would be fatal.
964  */
965 static void kgdb_tasklet_bpt(unsigned long ing)
966 {
967         kgdb_breakpoint();
968         atomic_set(&kgdb_break_tasklet_var, 0);
969 }
970
971 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
972
973 void kgdb_schedule_breakpoint(void)
974 {
975         if (atomic_read(&kgdb_break_tasklet_var) ||
976                 atomic_read(&kgdb_active) != -1 ||
977                 atomic_read(&kgdb_setting_breakpoint))
978                 return;
979         atomic_inc(&kgdb_break_tasklet_var);
980         tasklet_schedule(&kgdb_tasklet_breakpoint);
981 }
982 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
983
984 static void kgdb_initial_breakpoint(void)
985 {
986         kgdb_break_asap = 0;
987
988         pr_crit("Waiting for connection from remote gdb...\n");
989         kgdb_breakpoint();
990 }
991
992 /**
993  *      kgdb_register_io_module - register KGDB IO module
994  *      @new_dbg_io_ops: the io ops vector
995  *
996  *      Register it with the KGDB core.
997  */
998 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
999 {
1000         int err;
1001
1002         spin_lock(&kgdb_registration_lock);
1003
1004         if (dbg_io_ops) {
1005                 spin_unlock(&kgdb_registration_lock);
1006
1007                 pr_err("Another I/O driver is already registered with KGDB\n");
1008                 return -EBUSY;
1009         }
1010
1011         if (new_dbg_io_ops->init) {
1012                 err = new_dbg_io_ops->init();
1013                 if (err) {
1014                         spin_unlock(&kgdb_registration_lock);
1015                         return err;
1016                 }
1017         }
1018
1019         dbg_io_ops = new_dbg_io_ops;
1020
1021         spin_unlock(&kgdb_registration_lock);
1022
1023         pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
1024
1025         /* Arm KGDB now. */
1026         kgdb_register_callbacks();
1027
1028         if (kgdb_break_asap)
1029                 kgdb_initial_breakpoint();
1030
1031         return 0;
1032 }
1033 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1034
1035 /**
1036  *      kkgdb_unregister_io_module - unregister KGDB IO module
1037  *      @old_dbg_io_ops: the io ops vector
1038  *
1039  *      Unregister it with the KGDB core.
1040  */
1041 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1042 {
1043         BUG_ON(kgdb_connected);
1044
1045         /*
1046          * KGDB is no longer able to communicate out, so
1047          * unregister our callbacks and reset state.
1048          */
1049         kgdb_unregister_callbacks();
1050
1051         spin_lock(&kgdb_registration_lock);
1052
1053         WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1054         dbg_io_ops = NULL;
1055
1056         spin_unlock(&kgdb_registration_lock);
1057
1058         pr_info("Unregistered I/O driver %s, debugger disabled\n",
1059                 old_dbg_io_ops->name);
1060 }
1061 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1062
1063 int dbg_io_get_char(void)
1064 {
1065         int ret = dbg_io_ops->read_char();
1066         if (ret == NO_POLL_CHAR)
1067                 return -1;
1068         if (!dbg_kdb_mode)
1069                 return ret;
1070         if (ret == 127)
1071                 return 8;
1072         return ret;
1073 }
1074
1075 /**
1076  * kgdb_breakpoint - generate breakpoint exception
1077  *
1078  * This function will generate a breakpoint exception.  It is used at the
1079  * beginning of a program to sync up with a debugger and can be used
1080  * otherwise as a quick means to stop program execution and "break" into
1081  * the debugger.
1082  */
1083 noinline void kgdb_breakpoint(void)
1084 {
1085         atomic_inc(&kgdb_setting_breakpoint);
1086         wmb(); /* Sync point before breakpoint */
1087         arch_kgdb_breakpoint();
1088         wmb(); /* Sync point after breakpoint */
1089         atomic_dec(&kgdb_setting_breakpoint);
1090 }
1091 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1092
1093 static int __init opt_kgdb_wait(char *str)
1094 {
1095         kgdb_break_asap = 1;
1096
1097         kdb_init(KDB_INIT_EARLY);
1098         if (kgdb_io_module_registered)
1099                 kgdb_initial_breakpoint();
1100
1101         return 0;
1102 }
1103
1104 early_param("kgdbwait", opt_kgdb_wait);