xref: /linux-6.15/kernel/debug/debug_core.c (revision f2d10ff4)
1 /*
2  * Kernel Debug Core
3  *
4  * Maintainer: Jason Wessel <[email protected]>
5  *
6  * Copyright (C) 2000-2001 VERITAS Software Corporation.
7  * Copyright (C) 2002-2004 Timesys Corporation
8  * Copyright (C) 2003-2004 Amit S. Kale <[email protected]>
9  * Copyright (C) 2004 Pavel Machek <[email protected]>
10  * Copyright (C) 2004-2006 Tom Rini <[email protected]>
11  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
12  * Copyright (C) 2005-2009 Wind River Systems, Inc.
13  * Copyright (C) 2007 MontaVista Software, Inc.
14  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <[email protected]>
15  *
16  * Contributors at various stages not listed above:
17  *  Jason Wessel ( [email protected] )
18  *  George Anzinger <[email protected]>
19  *  Anurekh Saxena ([email protected])
20  *  Lake Stevens Instrument Division (Glenn Engel)
21  *  Jim Kingdon, Cygnus Support.
22  *
23  * Original KGDB stub: David Grothe <[email protected]>,
24  * Tigran Aivazian <[email protected]>
25  *
26  * This file is licensed under the terms of the GNU General Public License
27  * version 2. This program is licensed "as is" without any warranty of any
28  * kind, whether express or implied.
29  */
30 
31 #define pr_fmt(fmt) "KGDB: " fmt
32 
33 #include <linux/pid_namespace.h>
34 #include <linux/clocksource.h>
35 #include <linux/serial_core.h>
36 #include <linux/interrupt.h>
37 #include <linux/spinlock.h>
38 #include <linux/console.h>
39 #include <linux/threads.h>
40 #include <linux/uaccess.h>
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/delay.h>
46 #include <linux/sched.h>
47 #include <linux/sysrq.h>
48 #include <linux/reboot.h>
49 #include <linux/init.h>
50 #include <linux/kgdb.h>
51 #include <linux/kdb.h>
52 #include <linux/nmi.h>
53 #include <linux/pid.h>
54 #include <linux/smp.h>
55 #include <linux/mm.h>
56 #include <linux/vmacache.h>
57 #include <linux/rcupdate.h>
58 #include <linux/irq.h>
59 
60 #include <asm/cacheflush.h>
61 #include <asm/byteorder.h>
62 #include <linux/atomic.h>
63 
64 #include "debug_core.h"
65 
66 static int kgdb_break_asap;
67 
68 struct debuggerinfo_struct kgdb_info[NR_CPUS];
69 
70 /* kgdb_connected - Is a host GDB connected to us? */
71 int				kgdb_connected;
72 EXPORT_SYMBOL_GPL(kgdb_connected);
73 
74 /* All the KGDB handlers are installed */
75 int			kgdb_io_module_registered;
76 
77 /* Guard for recursive entry */
78 static int			exception_level;
79 
80 struct kgdb_io		*dbg_io_ops;
81 static DEFINE_SPINLOCK(kgdb_registration_lock);
82 
83 /* Action for the reboot notifier, a global allow kdb to change it */
84 static int kgdbreboot;
85 /* kgdb console driver is loaded */
86 static int kgdb_con_registered;
87 /* determine if kgdb console output should be used */
88 static int kgdb_use_con;
89 /* Flag for alternate operations for early debugging */
90 bool dbg_is_early = true;
91 /* Next cpu to become the master debug core */
92 int dbg_switch_cpu;
93 
94 /* Use kdb or gdbserver mode */
95 int dbg_kdb_mode = 1;
96 
97 module_param(kgdb_use_con, int, 0644);
98 module_param(kgdbreboot, int, 0644);
99 
100 /*
101  * Holds information about breakpoints in a kernel. These breakpoints are
102  * added and removed by gdb.
103  */
104 static struct kgdb_bkpt		kgdb_break[KGDB_MAX_BREAKPOINTS] = {
105 	[0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
106 };
107 
108 /*
109  * The CPU# of the active CPU, or -1 if none:
110  */
111 atomic_t			kgdb_active = ATOMIC_INIT(-1);
112 EXPORT_SYMBOL_GPL(kgdb_active);
113 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
114 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
115 
116 /*
117  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
118  * bootup code (which might not have percpu set up yet):
119  */
120 static atomic_t			masters_in_kgdb;
121 static atomic_t			slaves_in_kgdb;
122 static atomic_t			kgdb_break_tasklet_var;
123 atomic_t			kgdb_setting_breakpoint;
124 
125 struct task_struct		*kgdb_usethread;
126 struct task_struct		*kgdb_contthread;
127 
128 int				kgdb_single_step;
129 static pid_t			kgdb_sstep_pid;
130 
131 /* to keep track of the CPU which is doing the single stepping*/
132 atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
133 
134 /*
135  * If you are debugging a problem where roundup (the collection of
136  * all other CPUs) is a problem [this should be extremely rare],
137  * then use the nokgdbroundup option to avoid roundup. In that case
138  * the other CPUs might interfere with your debugging context, so
139  * use this with care:
140  */
141 static int kgdb_do_roundup = 1;
142 
143 static int __init opt_nokgdbroundup(char *str)
144 {
145 	kgdb_do_roundup = 0;
146 
147 	return 0;
148 }
149 
150 early_param("nokgdbroundup", opt_nokgdbroundup);
151 
152 /*
153  * Finally, some KGDB code :-)
154  */
155 
156 /*
157  * Weak aliases for breakpoint management,
158  * can be overridden by architectures when needed:
159  */
160 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
161 {
162 	int err;
163 
164 	err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
165 				BREAK_INSTR_SIZE);
166 	if (err)
167 		return err;
168 	err = copy_to_kernel_nofault((char *)bpt->bpt_addr,
169 				 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
170 	return err;
171 }
172 
173 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
174 {
175 	return copy_to_kernel_nofault((char *)bpt->bpt_addr,
176 				  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
177 }
178 
179 int __weak kgdb_validate_break_address(unsigned long addr)
180 {
181 	struct kgdb_bkpt tmp;
182 	int err;
183 
184 	if (kgdb_within_blocklist(addr))
185 		return -EINVAL;
186 
187 	/* Validate setting the breakpoint and then removing it.  If the
188 	 * remove fails, the kernel needs to emit a bad message because we
189 	 * are deep trouble not being able to put things back the way we
190 	 * found them.
191 	 */
192 	tmp.bpt_addr = addr;
193 	err = kgdb_arch_set_breakpoint(&tmp);
194 	if (err)
195 		return err;
196 	err = kgdb_arch_remove_breakpoint(&tmp);
197 	if (err)
198 		pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
199 		       addr);
200 	return err;
201 }
202 
203 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
204 {
205 	return instruction_pointer(regs);
206 }
207 
208 int __weak kgdb_arch_init(void)
209 {
210 	return 0;
211 }
212 
213 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
214 {
215 	return 0;
216 }
217 
218 #ifdef CONFIG_SMP
219 
220 /*
221  * Default (weak) implementation for kgdb_roundup_cpus
222  */
223 
224 static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd);
225 
226 void __weak kgdb_call_nmi_hook(void *ignored)
227 {
228 	/*
229 	 * NOTE: get_irq_regs() is supposed to get the registers from
230 	 * before the IPI interrupt happened and so is supposed to
231 	 * show where the processor was.  In some situations it's
232 	 * possible we might be called without an IPI, so it might be
233 	 * safer to figure out how to make kgdb_breakpoint() work
234 	 * properly here.
235 	 */
236 	kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
237 }
238 
239 void __weak kgdb_roundup_cpus(void)
240 {
241 	call_single_data_t *csd;
242 	int this_cpu = raw_smp_processor_id();
243 	int cpu;
244 	int ret;
245 
246 	for_each_online_cpu(cpu) {
247 		/* No need to roundup ourselves */
248 		if (cpu == this_cpu)
249 			continue;
250 
251 		csd = &per_cpu(kgdb_roundup_csd, cpu);
252 
253 		/*
254 		 * If it didn't round up last time, don't try again
255 		 * since smp_call_function_single_async() will block.
256 		 *
257 		 * If rounding_up is false then we know that the
258 		 * previous call must have at least started and that
259 		 * means smp_call_function_single_async() won't block.
260 		 */
261 		if (kgdb_info[cpu].rounding_up)
262 			continue;
263 		kgdb_info[cpu].rounding_up = true;
264 
265 		csd->func = kgdb_call_nmi_hook;
266 		ret = smp_call_function_single_async(cpu, csd);
267 		if (ret)
268 			kgdb_info[cpu].rounding_up = false;
269 	}
270 }
271 
272 #endif
273 
274 /*
275  * Some architectures need cache flushes when we set/clear a
276  * breakpoint:
277  */
278 static void kgdb_flush_swbreak_addr(unsigned long addr)
279 {
280 	if (!CACHE_FLUSH_IS_SAFE)
281 		return;
282 
283 	if (current->mm) {
284 		int i;
285 
286 		for (i = 0; i < VMACACHE_SIZE; i++) {
287 			if (!current->vmacache.vmas[i])
288 				continue;
289 			flush_cache_range(current->vmacache.vmas[i],
290 					  addr, addr + BREAK_INSTR_SIZE);
291 		}
292 	}
293 
294 	/* Force flush instruction cache if it was outside the mm */
295 	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
296 }
297 
298 /*
299  * SW breakpoint management:
300  */
301 int dbg_activate_sw_breakpoints(void)
302 {
303 	int error;
304 	int ret = 0;
305 	int i;
306 
307 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
308 		if (kgdb_break[i].state != BP_SET)
309 			continue;
310 
311 		error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
312 		if (error) {
313 			ret = error;
314 			pr_info("BP install failed: %lx\n",
315 				kgdb_break[i].bpt_addr);
316 			continue;
317 		}
318 
319 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
320 		kgdb_break[i].state = BP_ACTIVE;
321 	}
322 	return ret;
323 }
324 
325 int dbg_set_sw_break(unsigned long addr)
326 {
327 	int err = kgdb_validate_break_address(addr);
328 	int breakno = -1;
329 	int i;
330 
331 	if (err)
332 		return err;
333 
334 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
335 		if ((kgdb_break[i].state == BP_SET) &&
336 					(kgdb_break[i].bpt_addr == addr))
337 			return -EEXIST;
338 	}
339 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
340 		if (kgdb_break[i].state == BP_REMOVED &&
341 					kgdb_break[i].bpt_addr == addr) {
342 			breakno = i;
343 			break;
344 		}
345 	}
346 
347 	if (breakno == -1) {
348 		for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
349 			if (kgdb_break[i].state == BP_UNDEFINED) {
350 				breakno = i;
351 				break;
352 			}
353 		}
354 	}
355 
356 	if (breakno == -1)
357 		return -E2BIG;
358 
359 	kgdb_break[breakno].state = BP_SET;
360 	kgdb_break[breakno].type = BP_BREAKPOINT;
361 	kgdb_break[breakno].bpt_addr = addr;
362 
363 	return 0;
364 }
365 
366 int dbg_deactivate_sw_breakpoints(void)
367 {
368 	int error;
369 	int ret = 0;
370 	int i;
371 
372 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
373 		if (kgdb_break[i].state != BP_ACTIVE)
374 			continue;
375 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
376 		if (error) {
377 			pr_info("BP remove failed: %lx\n",
378 				kgdb_break[i].bpt_addr);
379 			ret = error;
380 		}
381 
382 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
383 		kgdb_break[i].state = BP_SET;
384 	}
385 	return ret;
386 }
387 
388 int dbg_remove_sw_break(unsigned long addr)
389 {
390 	int i;
391 
392 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
393 		if ((kgdb_break[i].state == BP_SET) &&
394 				(kgdb_break[i].bpt_addr == addr)) {
395 			kgdb_break[i].state = BP_REMOVED;
396 			return 0;
397 		}
398 	}
399 	return -ENOENT;
400 }
401 
402 int kgdb_isremovedbreak(unsigned long addr)
403 {
404 	int i;
405 
406 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
407 		if ((kgdb_break[i].state == BP_REMOVED) &&
408 					(kgdb_break[i].bpt_addr == addr))
409 			return 1;
410 	}
411 	return 0;
412 }
413 
414 int kgdb_has_hit_break(unsigned long addr)
415 {
416 	int i;
417 
418 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
419 		if (kgdb_break[i].state == BP_ACTIVE &&
420 		    kgdb_break[i].bpt_addr == addr)
421 			return 1;
422 	}
423 	return 0;
424 }
425 
426 int dbg_remove_all_break(void)
427 {
428 	int error;
429 	int i;
430 
431 	/* Clear memory breakpoints. */
432 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
433 		if (kgdb_break[i].state != BP_ACTIVE)
434 			goto setundefined;
435 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
436 		if (error)
437 			pr_err("breakpoint remove failed: %lx\n",
438 			       kgdb_break[i].bpt_addr);
439 setundefined:
440 		kgdb_break[i].state = BP_UNDEFINED;
441 	}
442 
443 	/* Clear hardware breakpoints. */
444 	if (arch_kgdb_ops.remove_all_hw_break)
445 		arch_kgdb_ops.remove_all_hw_break();
446 
447 	return 0;
448 }
449 
450 #ifdef CONFIG_KGDB_KDB
451 void kdb_dump_stack_on_cpu(int cpu)
452 {
453 	if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) {
454 		dump_stack();
455 		return;
456 	}
457 
458 	if (!(kgdb_info[cpu].exception_state & DCPU_IS_SLAVE)) {
459 		kdb_printf("ERROR: Task on cpu %d didn't stop in the debugger\n",
460 			   cpu);
461 		return;
462 	}
463 
464 	/*
465 	 * In general, architectures don't support dumping the stack of a
466 	 * "running" process that's not the current one.  From the point of
467 	 * view of the Linux, kernel processes that are looping in the kgdb
468 	 * slave loop are still "running".  There's also no API (that actually
469 	 * works across all architectures) that can do a stack crawl based
470 	 * on registers passed as a parameter.
471 	 *
472 	 * Solve this conundrum by asking slave CPUs to do the backtrace
473 	 * themselves.
474 	 */
475 	kgdb_info[cpu].exception_state |= DCPU_WANT_BT;
476 	while (kgdb_info[cpu].exception_state & DCPU_WANT_BT)
477 		cpu_relax();
478 }
479 #endif
480 
481 /*
482  * Return true if there is a valid kgdb I/O module.  Also if no
483  * debugger is attached a message can be printed to the console about
484  * waiting for the debugger to attach.
485  *
486  * The print_wait argument is only to be true when called from inside
487  * the core kgdb_handle_exception, because it will wait for the
488  * debugger to attach.
489  */
490 static int kgdb_io_ready(int print_wait)
491 {
492 	if (!dbg_io_ops)
493 		return 0;
494 	if (kgdb_connected)
495 		return 1;
496 	if (atomic_read(&kgdb_setting_breakpoint))
497 		return 1;
498 	if (print_wait) {
499 #ifdef CONFIG_KGDB_KDB
500 		if (!dbg_kdb_mode)
501 			pr_crit("waiting... or $3#33 for KDB\n");
502 #else
503 		pr_crit("Waiting for remote debugger\n");
504 #endif
505 	}
506 	return 1;
507 }
508 
509 static int kgdb_reenter_check(struct kgdb_state *ks)
510 {
511 	unsigned long addr;
512 
513 	if (atomic_read(&kgdb_active) != raw_smp_processor_id())
514 		return 0;
515 
516 	/* Panic on recursive debugger calls: */
517 	exception_level++;
518 	addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
519 	dbg_deactivate_sw_breakpoints();
520 
521 	/*
522 	 * If the break point removed ok at the place exception
523 	 * occurred, try to recover and print a warning to the end
524 	 * user because the user planted a breakpoint in a place that
525 	 * KGDB needs in order to function.
526 	 */
527 	if (dbg_remove_sw_break(addr) == 0) {
528 		exception_level = 0;
529 		kgdb_skipexception(ks->ex_vector, ks->linux_regs);
530 		dbg_activate_sw_breakpoints();
531 		pr_crit("re-enter error: breakpoint removed %lx\n", addr);
532 		WARN_ON_ONCE(1);
533 
534 		return 1;
535 	}
536 	dbg_remove_all_break();
537 	kgdb_skipexception(ks->ex_vector, ks->linux_regs);
538 
539 	if (exception_level > 1) {
540 		dump_stack();
541 		kgdb_io_module_registered = false;
542 		panic("Recursive entry to debugger");
543 	}
544 
545 	pr_crit("re-enter exception: ALL breakpoints killed\n");
546 #ifdef CONFIG_KGDB_KDB
547 	/* Allow kdb to debug itself one level */
548 	return 0;
549 #endif
550 	dump_stack();
551 	panic("Recursive entry to debugger");
552 
553 	return 1;
554 }
555 
556 static void dbg_touch_watchdogs(void)
557 {
558 	touch_softlockup_watchdog_sync();
559 	clocksource_touch_watchdog();
560 	rcu_cpu_stall_reset();
561 }
562 
563 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
564 		int exception_state)
565 {
566 	unsigned long flags;
567 	int sstep_tries = 100;
568 	int error;
569 	int cpu;
570 	int trace_on = 0;
571 	int online_cpus = num_online_cpus();
572 	u64 time_left;
573 
574 	kgdb_info[ks->cpu].enter_kgdb++;
575 	kgdb_info[ks->cpu].exception_state |= exception_state;
576 
577 	if (exception_state == DCPU_WANT_MASTER)
578 		atomic_inc(&masters_in_kgdb);
579 	else
580 		atomic_inc(&slaves_in_kgdb);
581 
582 	if (arch_kgdb_ops.disable_hw_break)
583 		arch_kgdb_ops.disable_hw_break(regs);
584 
585 acquirelock:
586 	rcu_read_lock();
587 	/*
588 	 * Interrupts will be restored by the 'trap return' code, except when
589 	 * single stepping.
590 	 */
591 	local_irq_save(flags);
592 
593 	cpu = ks->cpu;
594 	kgdb_info[cpu].debuggerinfo = regs;
595 	kgdb_info[cpu].task = current;
596 	kgdb_info[cpu].ret_state = 0;
597 	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
598 
599 	/* Make sure the above info reaches the primary CPU */
600 	smp_mb();
601 
602 	if (exception_level == 1) {
603 		if (raw_spin_trylock(&dbg_master_lock))
604 			atomic_xchg(&kgdb_active, cpu);
605 		goto cpu_master_loop;
606 	}
607 
608 	/*
609 	 * CPU will loop if it is a slave or request to become a kgdb
610 	 * master cpu and acquire the kgdb_active lock:
611 	 */
612 	while (1) {
613 cpu_loop:
614 		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
615 			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
616 			goto cpu_master_loop;
617 		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
618 			if (raw_spin_trylock(&dbg_master_lock)) {
619 				atomic_xchg(&kgdb_active, cpu);
620 				break;
621 			}
622 		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_BT) {
623 			dump_stack();
624 			kgdb_info[cpu].exception_state &= ~DCPU_WANT_BT;
625 		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
626 			if (!raw_spin_is_locked(&dbg_slave_lock))
627 				goto return_normal;
628 		} else {
629 return_normal:
630 			/* Return to normal operation by executing any
631 			 * hw breakpoint fixup.
632 			 */
633 			if (arch_kgdb_ops.correct_hw_break)
634 				arch_kgdb_ops.correct_hw_break();
635 			if (trace_on)
636 				tracing_on();
637 			kgdb_info[cpu].debuggerinfo = NULL;
638 			kgdb_info[cpu].task = NULL;
639 			kgdb_info[cpu].exception_state &=
640 				~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
641 			kgdb_info[cpu].enter_kgdb--;
642 			smp_mb__before_atomic();
643 			atomic_dec(&slaves_in_kgdb);
644 			dbg_touch_watchdogs();
645 			local_irq_restore(flags);
646 			rcu_read_unlock();
647 			return 0;
648 		}
649 		cpu_relax();
650 	}
651 
652 	/*
653 	 * For single stepping, try to only enter on the processor
654 	 * that was single stepping.  To guard against a deadlock, the
655 	 * kernel will only try for the value of sstep_tries before
656 	 * giving up and continuing on.
657 	 */
658 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
659 	    (kgdb_info[cpu].task &&
660 	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
661 		atomic_set(&kgdb_active, -1);
662 		raw_spin_unlock(&dbg_master_lock);
663 		dbg_touch_watchdogs();
664 		local_irq_restore(flags);
665 		rcu_read_unlock();
666 
667 		goto acquirelock;
668 	}
669 
670 	if (!kgdb_io_ready(1)) {
671 		kgdb_info[cpu].ret_state = 1;
672 		goto kgdb_restore; /* No I/O connection, resume the system */
673 	}
674 
675 	/*
676 	 * Don't enter if we have hit a removed breakpoint.
677 	 */
678 	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
679 		goto kgdb_restore;
680 
681 	atomic_inc(&ignore_console_lock_warning);
682 
683 	/* Call the I/O driver's pre_exception routine */
684 	if (dbg_io_ops->pre_exception)
685 		dbg_io_ops->pre_exception();
686 
687 	/*
688 	 * Get the passive CPU lock which will hold all the non-primary
689 	 * CPU in a spin state while the debugger is active
690 	 */
691 	if (!kgdb_single_step)
692 		raw_spin_lock(&dbg_slave_lock);
693 
694 #ifdef CONFIG_SMP
695 	/* If send_ready set, slaves are already waiting */
696 	if (ks->send_ready)
697 		atomic_set(ks->send_ready, 1);
698 
699 	/* Signal the other CPUs to enter kgdb_wait() */
700 	else if ((!kgdb_single_step) && kgdb_do_roundup)
701 		kgdb_roundup_cpus();
702 #endif
703 
704 	/*
705 	 * Wait for the other CPUs to be notified and be waiting for us:
706 	 */
707 	time_left = MSEC_PER_SEC;
708 	while (kgdb_do_roundup && --time_left &&
709 	       (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
710 		   online_cpus)
711 		udelay(1000);
712 	if (!time_left)
713 		pr_crit("Timed out waiting for secondary CPUs.\n");
714 
715 	/*
716 	 * At this point the primary processor is completely
717 	 * in the debugger and all secondary CPUs are quiescent
718 	 */
719 	dbg_deactivate_sw_breakpoints();
720 	kgdb_single_step = 0;
721 	kgdb_contthread = current;
722 	exception_level = 0;
723 	trace_on = tracing_is_on();
724 	if (trace_on)
725 		tracing_off();
726 
727 	while (1) {
728 cpu_master_loop:
729 		if (dbg_kdb_mode) {
730 			kgdb_connected = 1;
731 			error = kdb_stub(ks);
732 			if (error == -1)
733 				continue;
734 			kgdb_connected = 0;
735 		} else {
736 			error = gdb_serial_stub(ks);
737 		}
738 
739 		if (error == DBG_PASS_EVENT) {
740 			dbg_kdb_mode = !dbg_kdb_mode;
741 		} else if (error == DBG_SWITCH_CPU_EVENT) {
742 			kgdb_info[dbg_switch_cpu].exception_state |=
743 				DCPU_NEXT_MASTER;
744 			goto cpu_loop;
745 		} else {
746 			kgdb_info[cpu].ret_state = error;
747 			break;
748 		}
749 	}
750 
751 	/* Call the I/O driver's post_exception routine */
752 	if (dbg_io_ops->post_exception)
753 		dbg_io_ops->post_exception();
754 
755 	atomic_dec(&ignore_console_lock_warning);
756 
757 	if (!kgdb_single_step) {
758 		raw_spin_unlock(&dbg_slave_lock);
759 		/* Wait till all the CPUs have quit from the debugger. */
760 		while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
761 			cpu_relax();
762 	}
763 
764 kgdb_restore:
765 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
766 		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
767 		if (kgdb_info[sstep_cpu].task)
768 			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
769 		else
770 			kgdb_sstep_pid = 0;
771 	}
772 	if (arch_kgdb_ops.correct_hw_break)
773 		arch_kgdb_ops.correct_hw_break();
774 	if (trace_on)
775 		tracing_on();
776 
777 	kgdb_info[cpu].debuggerinfo = NULL;
778 	kgdb_info[cpu].task = NULL;
779 	kgdb_info[cpu].exception_state &=
780 		~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
781 	kgdb_info[cpu].enter_kgdb--;
782 	smp_mb__before_atomic();
783 	atomic_dec(&masters_in_kgdb);
784 	/* Free kgdb_active */
785 	atomic_set(&kgdb_active, -1);
786 	raw_spin_unlock(&dbg_master_lock);
787 	dbg_touch_watchdogs();
788 	local_irq_restore(flags);
789 	rcu_read_unlock();
790 
791 	return kgdb_info[cpu].ret_state;
792 }
793 
794 /*
795  * kgdb_handle_exception() - main entry point from a kernel exception
796  *
797  * Locking hierarchy:
798  *	interface locks, if any (begin_session)
799  *	kgdb lock (kgdb_active)
800  */
801 int
802 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
803 {
804 	struct kgdb_state kgdb_var;
805 	struct kgdb_state *ks = &kgdb_var;
806 	int ret = 0;
807 
808 	if (arch_kgdb_ops.enable_nmi)
809 		arch_kgdb_ops.enable_nmi(0);
810 	/*
811 	 * Avoid entering the debugger if we were triggered due to an oops
812 	 * but panic_timeout indicates the system should automatically
813 	 * reboot on panic. We don't want to get stuck waiting for input
814 	 * on such systems, especially if its "just" an oops.
815 	 */
816 	if (signo != SIGTRAP && panic_timeout)
817 		return 1;
818 
819 	memset(ks, 0, sizeof(struct kgdb_state));
820 	ks->cpu			= raw_smp_processor_id();
821 	ks->ex_vector		= evector;
822 	ks->signo		= signo;
823 	ks->err_code		= ecode;
824 	ks->linux_regs		= regs;
825 
826 	if (kgdb_reenter_check(ks))
827 		goto out; /* Ouch, double exception ! */
828 	if (kgdb_info[ks->cpu].enter_kgdb != 0)
829 		goto out;
830 
831 	ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
832 out:
833 	if (arch_kgdb_ops.enable_nmi)
834 		arch_kgdb_ops.enable_nmi(1);
835 	return ret;
836 }
837 
838 /*
839  * GDB places a breakpoint at this function to know dynamically loaded objects.
840  */
841 static int module_event(struct notifier_block *self, unsigned long val,
842 	void *data)
843 {
844 	return 0;
845 }
846 
847 static struct notifier_block dbg_module_load_nb = {
848 	.notifier_call	= module_event,
849 };
850 
851 int kgdb_nmicallback(int cpu, void *regs)
852 {
853 #ifdef CONFIG_SMP
854 	struct kgdb_state kgdb_var;
855 	struct kgdb_state *ks = &kgdb_var;
856 
857 	kgdb_info[cpu].rounding_up = false;
858 
859 	memset(ks, 0, sizeof(struct kgdb_state));
860 	ks->cpu			= cpu;
861 	ks->linux_regs		= regs;
862 
863 	if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
864 			raw_spin_is_locked(&dbg_master_lock)) {
865 		kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
866 		return 0;
867 	}
868 #endif
869 	return 1;
870 }
871 
872 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
873 							atomic_t *send_ready)
874 {
875 #ifdef CONFIG_SMP
876 	if (!kgdb_io_ready(0) || !send_ready)
877 		return 1;
878 
879 	if (kgdb_info[cpu].enter_kgdb == 0) {
880 		struct kgdb_state kgdb_var;
881 		struct kgdb_state *ks = &kgdb_var;
882 
883 		memset(ks, 0, sizeof(struct kgdb_state));
884 		ks->cpu			= cpu;
885 		ks->ex_vector		= trapnr;
886 		ks->signo		= SIGTRAP;
887 		ks->err_code		= err_code;
888 		ks->linux_regs		= regs;
889 		ks->send_ready		= send_ready;
890 		kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
891 		return 0;
892 	}
893 #endif
894 	return 1;
895 }
896 
897 static void kgdb_console_write(struct console *co, const char *s,
898    unsigned count)
899 {
900 	unsigned long flags;
901 
902 	/* If we're debugging, or KGDB has not connected, don't try
903 	 * and print. */
904 	if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
905 		return;
906 
907 	local_irq_save(flags);
908 	gdbstub_msg_write(s, count);
909 	local_irq_restore(flags);
910 }
911 
912 static struct console kgdbcons = {
913 	.name		= "kgdb",
914 	.write		= kgdb_console_write,
915 	.flags		= CON_PRINTBUFFER | CON_ENABLED,
916 	.index		= -1,
917 };
918 
919 static int __init opt_kgdb_con(char *str)
920 {
921 	kgdb_use_con = 1;
922 
923 	if (kgdb_io_module_registered && !kgdb_con_registered) {
924 		register_console(&kgdbcons);
925 		kgdb_con_registered = 1;
926 	}
927 
928 	return 0;
929 }
930 
931 early_param("kgdbcon", opt_kgdb_con);
932 
933 #ifdef CONFIG_MAGIC_SYSRQ
934 static void sysrq_handle_dbg(int key)
935 {
936 	if (!dbg_io_ops) {
937 		pr_crit("ERROR: No KGDB I/O module available\n");
938 		return;
939 	}
940 	if (!kgdb_connected) {
941 #ifdef CONFIG_KGDB_KDB
942 		if (!dbg_kdb_mode)
943 			pr_crit("KGDB or $3#33 for KDB\n");
944 #else
945 		pr_crit("Entering KGDB\n");
946 #endif
947 	}
948 
949 	kgdb_breakpoint();
950 }
951 
952 static const struct sysrq_key_op sysrq_dbg_op = {
953 	.handler	= sysrq_handle_dbg,
954 	.help_msg	= "debug(g)",
955 	.action_msg	= "DEBUG",
956 };
957 #endif
958 
959 void kgdb_panic(const char *msg)
960 {
961 	if (!kgdb_io_module_registered)
962 		return;
963 
964 	/*
965 	 * We don't want to get stuck waiting for input from user if
966 	 * "panic_timeout" indicates the system should automatically
967 	 * reboot on panic.
968 	 */
969 	if (panic_timeout)
970 		return;
971 
972 	if (dbg_kdb_mode)
973 		kdb_printf("PANIC: %s\n", msg);
974 
975 	kgdb_breakpoint();
976 }
977 
978 static void kgdb_initial_breakpoint(void)
979 {
980 	kgdb_break_asap = 0;
981 
982 	pr_crit("Waiting for connection from remote gdb...\n");
983 	kgdb_breakpoint();
984 }
985 
986 void __weak kgdb_arch_late(void)
987 {
988 }
989 
990 void __init dbg_late_init(void)
991 {
992 	dbg_is_early = false;
993 	if (kgdb_io_module_registered)
994 		kgdb_arch_late();
995 	kdb_init(KDB_INIT_FULL);
996 
997 	if (kgdb_io_module_registered && kgdb_break_asap)
998 		kgdb_initial_breakpoint();
999 }
1000 
1001 static int
1002 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
1003 {
1004 	/*
1005 	 * Take the following action on reboot notify depending on value:
1006 	 *    1 == Enter debugger
1007 	 *    0 == [the default] detatch debug client
1008 	 *   -1 == Do nothing... and use this until the board resets
1009 	 */
1010 	switch (kgdbreboot) {
1011 	case 1:
1012 		kgdb_breakpoint();
1013 	case -1:
1014 		goto done;
1015 	}
1016 	if (!dbg_kdb_mode)
1017 		gdbstub_exit(code);
1018 done:
1019 	return NOTIFY_DONE;
1020 }
1021 
1022 static struct notifier_block dbg_reboot_notifier = {
1023 	.notifier_call		= dbg_notify_reboot,
1024 	.next			= NULL,
1025 	.priority		= INT_MAX,
1026 };
1027 
1028 static void kgdb_register_callbacks(void)
1029 {
1030 	if (!kgdb_io_module_registered) {
1031 		kgdb_io_module_registered = 1;
1032 		kgdb_arch_init();
1033 		if (!dbg_is_early)
1034 			kgdb_arch_late();
1035 		register_module_notifier(&dbg_module_load_nb);
1036 		register_reboot_notifier(&dbg_reboot_notifier);
1037 #ifdef CONFIG_MAGIC_SYSRQ
1038 		register_sysrq_key('g', &sysrq_dbg_op);
1039 #endif
1040 		if (kgdb_use_con && !kgdb_con_registered) {
1041 			register_console(&kgdbcons);
1042 			kgdb_con_registered = 1;
1043 		}
1044 	}
1045 }
1046 
1047 static void kgdb_unregister_callbacks(void)
1048 {
1049 	/*
1050 	 * When this routine is called KGDB should unregister from
1051 	 * handlers and clean up, making sure it is not handling any
1052 	 * break exceptions at the time.
1053 	 */
1054 	if (kgdb_io_module_registered) {
1055 		kgdb_io_module_registered = 0;
1056 		unregister_reboot_notifier(&dbg_reboot_notifier);
1057 		unregister_module_notifier(&dbg_module_load_nb);
1058 		kgdb_arch_exit();
1059 #ifdef CONFIG_MAGIC_SYSRQ
1060 		unregister_sysrq_key('g', &sysrq_dbg_op);
1061 #endif
1062 		if (kgdb_con_registered) {
1063 			unregister_console(&kgdbcons);
1064 			kgdb_con_registered = 0;
1065 		}
1066 	}
1067 }
1068 
1069 /*
1070  * There are times a tasklet needs to be used vs a compiled in
1071  * break point so as to cause an exception outside a kgdb I/O module,
1072  * such as is the case with kgdboe, where calling a breakpoint in the
1073  * I/O driver itself would be fatal.
1074  */
1075 static void kgdb_tasklet_bpt(unsigned long ing)
1076 {
1077 	kgdb_breakpoint();
1078 	atomic_set(&kgdb_break_tasklet_var, 0);
1079 }
1080 
1081 static DECLARE_TASKLET_OLD(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt);
1082 
1083 void kgdb_schedule_breakpoint(void)
1084 {
1085 	if (atomic_read(&kgdb_break_tasklet_var) ||
1086 		atomic_read(&kgdb_active) != -1 ||
1087 		atomic_read(&kgdb_setting_breakpoint))
1088 		return;
1089 	atomic_inc(&kgdb_break_tasklet_var);
1090 	tasklet_schedule(&kgdb_tasklet_breakpoint);
1091 }
1092 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
1093 
1094 /**
1095  *	kgdb_register_io_module - register KGDB IO module
1096  *	@new_dbg_io_ops: the io ops vector
1097  *
1098  *	Register it with the KGDB core.
1099  */
1100 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
1101 {
1102 	struct kgdb_io *old_dbg_io_ops;
1103 	int err;
1104 
1105 	spin_lock(&kgdb_registration_lock);
1106 
1107 	old_dbg_io_ops = dbg_io_ops;
1108 	if (old_dbg_io_ops) {
1109 		if (!old_dbg_io_ops->deinit) {
1110 			spin_unlock(&kgdb_registration_lock);
1111 
1112 			pr_err("KGDB I/O driver %s can't replace %s.\n",
1113 				new_dbg_io_ops->name, old_dbg_io_ops->name);
1114 			return -EBUSY;
1115 		}
1116 		pr_info("Replacing I/O driver %s with %s\n",
1117 			old_dbg_io_ops->name, new_dbg_io_ops->name);
1118 	}
1119 
1120 	if (new_dbg_io_ops->init) {
1121 		err = new_dbg_io_ops->init();
1122 		if (err) {
1123 			spin_unlock(&kgdb_registration_lock);
1124 			return err;
1125 		}
1126 	}
1127 
1128 	dbg_io_ops = new_dbg_io_ops;
1129 
1130 	spin_unlock(&kgdb_registration_lock);
1131 
1132 	if (old_dbg_io_ops) {
1133 		old_dbg_io_ops->deinit();
1134 		return 0;
1135 	}
1136 
1137 	pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
1138 
1139 	/* Arm KGDB now. */
1140 	kgdb_register_callbacks();
1141 
1142 	if (kgdb_break_asap &&
1143 	    (!dbg_is_early || IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG)))
1144 		kgdb_initial_breakpoint();
1145 
1146 	return 0;
1147 }
1148 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1149 
1150 /**
1151  *	kkgdb_unregister_io_module - unregister KGDB IO module
1152  *	@old_dbg_io_ops: the io ops vector
1153  *
1154  *	Unregister it with the KGDB core.
1155  */
1156 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1157 {
1158 	BUG_ON(kgdb_connected);
1159 
1160 	/*
1161 	 * KGDB is no longer able to communicate out, so
1162 	 * unregister our callbacks and reset state.
1163 	 */
1164 	kgdb_unregister_callbacks();
1165 
1166 	spin_lock(&kgdb_registration_lock);
1167 
1168 	WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1169 	dbg_io_ops = NULL;
1170 
1171 	spin_unlock(&kgdb_registration_lock);
1172 
1173 	if (old_dbg_io_ops->deinit)
1174 		old_dbg_io_ops->deinit();
1175 
1176 	pr_info("Unregistered I/O driver %s, debugger disabled\n",
1177 		old_dbg_io_ops->name);
1178 }
1179 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1180 
1181 int dbg_io_get_char(void)
1182 {
1183 	int ret = dbg_io_ops->read_char();
1184 	if (ret == NO_POLL_CHAR)
1185 		return -1;
1186 	if (!dbg_kdb_mode)
1187 		return ret;
1188 	if (ret == 127)
1189 		return 8;
1190 	return ret;
1191 }
1192 
1193 /**
1194  * kgdb_breakpoint - generate breakpoint exception
1195  *
1196  * This function will generate a breakpoint exception.  It is used at the
1197  * beginning of a program to sync up with a debugger and can be used
1198  * otherwise as a quick means to stop program execution and "break" into
1199  * the debugger.
1200  */
1201 noinline void kgdb_breakpoint(void)
1202 {
1203 	atomic_inc(&kgdb_setting_breakpoint);
1204 	wmb(); /* Sync point before breakpoint */
1205 	arch_kgdb_breakpoint();
1206 	wmb(); /* Sync point after breakpoint */
1207 	atomic_dec(&kgdb_setting_breakpoint);
1208 }
1209 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1210 
1211 static int __init opt_kgdb_wait(char *str)
1212 {
1213 	kgdb_break_asap = 1;
1214 
1215 	kdb_init(KDB_INIT_EARLY);
1216 	if (kgdb_io_module_registered &&
1217 	    IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG))
1218 		kgdb_initial_breakpoint();
1219 
1220 	return 0;
1221 }
1222 
1223 early_param("kgdbwait", opt_kgdb_wait);
1224