debug_core.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252
  1. /*
  2. * Kernel Debug Core
  3. *
  4. * Maintainer: Jason Wessel <jason.wessel@windriver.com>
  5. *
  6. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  7. * Copyright (C) 2002-2004 Timesys Corporation
  8. * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
  9. * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
  10. * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
  11. * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
  12. * Copyright (C) 2005-2009 Wind River Systems, Inc.
  13. * Copyright (C) 2007 MontaVista Software, Inc.
  14. * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  15. *
  16. * Contributors at various stages not listed above:
  17. * Jason Wessel ( jason.wessel@windriver.com )
  18. * George Anzinger <george@mvista.com>
  19. * Anurekh Saxena (anurekh.saxena@timesys.com)
  20. * Lake Stevens Instrument Division (Glenn Engel)
  21. * Jim Kingdon, Cygnus Support.
  22. *
  23. * Original KGDB stub: David Grothe <dave@gcom.com>,
  24. * Tigran Aivazian <tigran@sco.com>
  25. *
  26. * This file is licensed under the terms of the GNU General Public License
  27. * version 2. This program is licensed "as is" without any warranty of any
  28. * kind, whether express or implied.
  29. */
  30. #define pr_fmt(fmt) "KGDB: " fmt
  31. #include <linux/pid_namespace.h>
  32. #include <linux/clocksource.h>
  33. #include <linux/serial_core.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/console.h>
  37. #include <linux/threads.h>
  38. #include <linux/uaccess.h>
  39. #include <linux/kernel.h>
  40. #include <linux/module.h>
  41. #include <linux/ptrace.h>
  42. #include <linux/string.h>
  43. #include <linux/delay.h>
  44. #include <linux/sched.h>
  45. #include <linux/sysrq.h>
  46. #include <linux/reboot.h>
  47. #include <linux/init.h>
  48. #include <linux/kgdb.h>
  49. #include <linux/kdb.h>
  50. #include <linux/nmi.h>
  51. #include <linux/pid.h>
  52. #include <linux/smp.h>
  53. #include <linux/mm.h>
  54. #include <linux/vmacache.h>
  55. #include <linux/rcupdate.h>
  56. #include <linux/irq.h>
  57. #include <asm/cacheflush.h>
  58. #include <asm/byteorder.h>
  59. #include <linux/atomic.h>
  60. #include "debug_core.h"
  61. static int kgdb_break_asap;
  62. struct debuggerinfo_struct kgdb_info[NR_CPUS];
  63. /* kgdb_connected - Is a host GDB connected to us? */
  64. int kgdb_connected;
  65. EXPORT_SYMBOL_GPL(kgdb_connected);
  66. /* All the KGDB handlers are installed */
  67. int kgdb_io_module_registered;
  68. /* Guard for recursive entry */
  69. static int exception_level;
  70. struct kgdb_io *dbg_io_ops;
  71. static DEFINE_SPINLOCK(kgdb_registration_lock);
  72. /* Action for the reboot notifier, a global allow kdb to change it */
  73. static int kgdbreboot;
  74. /* kgdb console driver is loaded */
  75. static int kgdb_con_registered;
  76. /* determine if kgdb console output should be used */
  77. static int kgdb_use_con;
  78. /* Flag for alternate operations for early debugging */
  79. bool dbg_is_early = true;
  80. /* Next cpu to become the master debug core */
  81. int dbg_switch_cpu;
  82. /* Use kdb or gdbserver mode */
  83. int dbg_kdb_mode = 1;
  84. module_param(kgdb_use_con, int, 0644);
  85. module_param(kgdbreboot, int, 0644);
  86. /*
  87. * Holds information about breakpoints in a kernel. These breakpoints are
  88. * added and removed by gdb.
  89. */
  90. static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
  91. [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
  92. };
  93. /*
  94. * The CPU# of the active CPU, or -1 if none:
  95. */
  96. atomic_t kgdb_active = ATOMIC_INIT(-1);
  97. EXPORT_SYMBOL_GPL(kgdb_active);
  98. static DEFINE_RAW_SPINLOCK(dbg_master_lock);
  99. static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
  100. /*
  101. * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
  102. * bootup code (which might not have percpu set up yet):
  103. */
  104. static atomic_t masters_in_kgdb;
  105. static atomic_t slaves_in_kgdb;
  106. static atomic_t kgdb_break_tasklet_var;
  107. atomic_t kgdb_setting_breakpoint;
  108. struct task_struct *kgdb_usethread;
  109. struct task_struct *kgdb_contthread;
  110. int kgdb_single_step;
  111. static pid_t kgdb_sstep_pid;
  112. /* to keep track of the CPU which is doing the single stepping*/
  113. atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
  114. /*
  115. * If you are debugging a problem where roundup (the collection of
  116. * all other CPUs) is a problem [this should be extremely rare],
  117. * then use the nokgdbroundup option to avoid roundup. In that case
  118. * the other CPUs might interfere with your debugging context, so
  119. * use this with care:
  120. */
  121. static int kgdb_do_roundup = 1;
  122. static int __init opt_nokgdbroundup(char *str)
  123. {
  124. kgdb_do_roundup = 0;
  125. return 0;
  126. }
  127. early_param("nokgdbroundup", opt_nokgdbroundup);
  128. /*
  129. * Finally, some KGDB code :-)
  130. */
  131. /*
  132. * Weak aliases for breakpoint management,
  133. * can be overridden by architectures when needed:
  134. */
  135. int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
  136. {
  137. int err;
  138. err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
  139. BREAK_INSTR_SIZE);
  140. if (err)
  141. return err;
  142. err = copy_to_kernel_nofault((char *)bpt->bpt_addr,
  143. arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
  144. return err;
  145. }
  146. NOKPROBE_SYMBOL(kgdb_arch_set_breakpoint);
  147. int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
  148. {
  149. return copy_to_kernel_nofault((char *)bpt->bpt_addr,
  150. (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
  151. }
  152. NOKPROBE_SYMBOL(kgdb_arch_remove_breakpoint);
  153. int __weak kgdb_validate_break_address(unsigned long addr)
  154. {
  155. struct kgdb_bkpt tmp;
  156. int err;
  157. if (kgdb_within_blocklist(addr))
  158. return -EINVAL;
  159. /* Validate setting the breakpoint and then removing it. If the
  160. * remove fails, the kernel needs to emit a bad message because we
  161. * are deep trouble not being able to put things back the way we
  162. * found them.
  163. */
  164. tmp.bpt_addr = addr;
  165. err = kgdb_arch_set_breakpoint(&tmp);
  166. if (err)
  167. return err;
  168. err = kgdb_arch_remove_breakpoint(&tmp);
  169. if (err)
  170. pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
  171. addr);
  172. return err;
  173. }
  174. unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
  175. {
  176. return instruction_pointer(regs);
  177. }
  178. NOKPROBE_SYMBOL(kgdb_arch_pc);
  179. int __weak kgdb_arch_init(void)
  180. {
  181. return 0;
  182. }
  183. int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
  184. {
  185. return 0;
  186. }
  187. NOKPROBE_SYMBOL(kgdb_skipexception);
  188. #ifdef CONFIG_SMP
  189. /*
  190. * Default (weak) implementation for kgdb_roundup_cpus
  191. */
  192. static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd);
  193. void __weak kgdb_call_nmi_hook(void *ignored)
  194. {
  195. /*
  196. * NOTE: get_irq_regs() is supposed to get the registers from
  197. * before the IPI interrupt happened and so is supposed to
  198. * show where the processor was. In some situations it's
  199. * possible we might be called without an IPI, so it might be
  200. * safer to figure out how to make kgdb_breakpoint() work
  201. * properly here.
  202. */
  203. kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
  204. }
  205. NOKPROBE_SYMBOL(kgdb_call_nmi_hook);
  206. void __weak kgdb_roundup_cpus(void)
  207. {
  208. call_single_data_t *csd;
  209. int this_cpu = raw_smp_processor_id();
  210. int cpu;
  211. int ret;
  212. for_each_online_cpu(cpu) {
  213. /* No need to roundup ourselves */
  214. if (cpu == this_cpu)
  215. continue;
  216. csd = &per_cpu(kgdb_roundup_csd, cpu);
  217. /*
  218. * If it didn't round up last time, don't try again
  219. * since smp_call_function_single_async() will block.
  220. *
  221. * If rounding_up is false then we know that the
  222. * previous call must have at least started and that
  223. * means smp_call_function_single_async() won't block.
  224. */
  225. if (kgdb_info[cpu].rounding_up)
  226. continue;
  227. kgdb_info[cpu].rounding_up = true;
  228. csd->func = kgdb_call_nmi_hook;
  229. ret = smp_call_function_single_async(cpu, csd);
  230. if (ret)
  231. kgdb_info[cpu].rounding_up = false;
  232. }
  233. }
  234. NOKPROBE_SYMBOL(kgdb_roundup_cpus);
  235. #endif
  236. /*
  237. * Some architectures need cache flushes when we set/clear a
  238. * breakpoint:
  239. */
  240. static void kgdb_flush_swbreak_addr(unsigned long addr)
  241. {
  242. if (!CACHE_FLUSH_IS_SAFE)
  243. return;
  244. if (current->mm) {
  245. int i;
  246. for (i = 0; i < VMACACHE_SIZE; i++) {
  247. if (!current->vmacache.vmas[i])
  248. continue;
  249. flush_cache_range(current->vmacache.vmas[i],
  250. addr, addr + BREAK_INSTR_SIZE);
  251. }
  252. }
  253. /* Force flush instruction cache if it was outside the mm */
  254. flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
  255. }
  256. NOKPROBE_SYMBOL(kgdb_flush_swbreak_addr);
  257. /*
  258. * SW breakpoint management:
  259. */
  260. int dbg_activate_sw_breakpoints(void)
  261. {
  262. int error;
  263. int ret = 0;
  264. int i;
  265. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  266. if (kgdb_break[i].state != BP_SET)
  267. continue;
  268. error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
  269. if (error) {
  270. ret = error;
  271. pr_info("BP install failed: %lx\n",
  272. kgdb_break[i].bpt_addr);
  273. continue;
  274. }
  275. kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
  276. kgdb_break[i].state = BP_ACTIVE;
  277. }
  278. return ret;
  279. }
  280. NOKPROBE_SYMBOL(dbg_activate_sw_breakpoints);
  281. int dbg_set_sw_break(unsigned long addr)
  282. {
  283. int err = kgdb_validate_break_address(addr);
  284. int breakno = -1;
  285. int i;
  286. if (err)
  287. return err;
  288. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  289. if ((kgdb_break[i].state == BP_SET) &&
  290. (kgdb_break[i].bpt_addr == addr))
  291. return -EEXIST;
  292. }
  293. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  294. if (kgdb_break[i].state == BP_REMOVED &&
  295. kgdb_break[i].bpt_addr == addr) {
  296. breakno = i;
  297. break;
  298. }
  299. }
  300. if (breakno == -1) {
  301. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  302. if (kgdb_break[i].state == BP_UNDEFINED) {
  303. breakno = i;
  304. break;
  305. }
  306. }
  307. }
  308. if (breakno == -1)
  309. return -E2BIG;
  310. kgdb_break[breakno].state = BP_SET;
  311. kgdb_break[breakno].type = BP_BREAKPOINT;
  312. kgdb_break[breakno].bpt_addr = addr;
  313. return 0;
  314. }
  315. int dbg_deactivate_sw_breakpoints(void)
  316. {
  317. int error;
  318. int ret = 0;
  319. int i;
  320. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  321. if (kgdb_break[i].state != BP_ACTIVE)
  322. continue;
  323. error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
  324. if (error) {
  325. pr_info("BP remove failed: %lx\n",
  326. kgdb_break[i].bpt_addr);
  327. ret = error;
  328. }
  329. kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
  330. kgdb_break[i].state = BP_SET;
  331. }
  332. return ret;
  333. }
  334. NOKPROBE_SYMBOL(dbg_deactivate_sw_breakpoints);
  335. int dbg_remove_sw_break(unsigned long addr)
  336. {
  337. int i;
  338. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  339. if ((kgdb_break[i].state == BP_SET) &&
  340. (kgdb_break[i].bpt_addr == addr)) {
  341. kgdb_break[i].state = BP_REMOVED;
  342. return 0;
  343. }
  344. }
  345. return -ENOENT;
  346. }
  347. int kgdb_isremovedbreak(unsigned long addr)
  348. {
  349. int i;
  350. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  351. if ((kgdb_break[i].state == BP_REMOVED) &&
  352. (kgdb_break[i].bpt_addr == addr))
  353. return 1;
  354. }
  355. return 0;
  356. }
  357. int kgdb_has_hit_break(unsigned long addr)
  358. {
  359. int i;
  360. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  361. if (kgdb_break[i].state == BP_ACTIVE &&
  362. kgdb_break[i].bpt_addr == addr)
  363. return 1;
  364. }
  365. return 0;
  366. }
  367. int dbg_remove_all_break(void)
  368. {
  369. int error;
  370. int i;
  371. /* Clear memory breakpoints. */
  372. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  373. if (kgdb_break[i].state != BP_ACTIVE)
  374. goto setundefined;
  375. error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
  376. if (error)
  377. pr_err("breakpoint remove failed: %lx\n",
  378. kgdb_break[i].bpt_addr);
  379. setundefined:
  380. kgdb_break[i].state = BP_UNDEFINED;
  381. }
  382. /* Clear hardware breakpoints. */
  383. if (arch_kgdb_ops.remove_all_hw_break)
  384. arch_kgdb_ops.remove_all_hw_break();
  385. return 0;
  386. }
  387. void kgdb_free_init_mem(void)
  388. {
  389. int i;
  390. /* Clear init memory breakpoints. */
  391. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  392. if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0))
  393. kgdb_break[i].state = BP_UNDEFINED;
  394. }
  395. }
  396. #ifdef CONFIG_KGDB_KDB
  397. void kdb_dump_stack_on_cpu(int cpu)
  398. {
  399. if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) {
  400. dump_stack();
  401. return;
  402. }
  403. if (!(kgdb_info[cpu].exception_state & DCPU_IS_SLAVE)) {
  404. kdb_printf("ERROR: Task on cpu %d didn't stop in the debugger\n",
  405. cpu);
  406. return;
  407. }
  408. /*
  409. * In general, architectures don't support dumping the stack of a
  410. * "running" process that's not the current one. From the point of
  411. * view of the Linux, kernel processes that are looping in the kgdb
  412. * slave loop are still "running". There's also no API (that actually
  413. * works across all architectures) that can do a stack crawl based
  414. * on registers passed as a parameter.
  415. *
  416. * Solve this conundrum by asking slave CPUs to do the backtrace
  417. * themselves.
  418. */
  419. kgdb_info[cpu].exception_state |= DCPU_WANT_BT;
  420. while (kgdb_info[cpu].exception_state & DCPU_WANT_BT)
  421. cpu_relax();
  422. }
  423. #endif
  424. /*
  425. * Return true if there is a valid kgdb I/O module. Also if no
  426. * debugger is attached a message can be printed to the console about
  427. * waiting for the debugger to attach.
  428. *
  429. * The print_wait argument is only to be true when called from inside
  430. * the core kgdb_handle_exception, because it will wait for the
  431. * debugger to attach.
  432. */
  433. static int kgdb_io_ready(int print_wait)
  434. {
  435. if (!dbg_io_ops)
  436. return 0;
  437. if (kgdb_connected)
  438. return 1;
  439. if (atomic_read(&kgdb_setting_breakpoint))
  440. return 1;
  441. if (print_wait) {
  442. #ifdef CONFIG_KGDB_KDB
  443. if (!dbg_kdb_mode)
  444. pr_crit("waiting... or $3#33 for KDB\n");
  445. #else
  446. pr_crit("Waiting for remote debugger\n");
  447. #endif
  448. }
  449. return 1;
  450. }
  451. NOKPROBE_SYMBOL(kgdb_io_ready);
  452. static int kgdb_reenter_check(struct kgdb_state *ks)
  453. {
  454. unsigned long addr;
  455. if (atomic_read(&kgdb_active) != raw_smp_processor_id())
  456. return 0;
  457. /* Panic on recursive debugger calls: */
  458. exception_level++;
  459. addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
  460. dbg_deactivate_sw_breakpoints();
  461. /*
  462. * If the break point removed ok at the place exception
  463. * occurred, try to recover and print a warning to the end
  464. * user because the user planted a breakpoint in a place that
  465. * KGDB needs in order to function.
  466. */
  467. if (dbg_remove_sw_break(addr) == 0) {
  468. exception_level = 0;
  469. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  470. dbg_activate_sw_breakpoints();
  471. pr_crit("re-enter error: breakpoint removed %lx\n", addr);
  472. WARN_ON_ONCE(1);
  473. return 1;
  474. }
  475. dbg_remove_all_break();
  476. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  477. if (exception_level > 1) {
  478. dump_stack();
  479. kgdb_io_module_registered = false;
  480. panic("Recursive entry to debugger");
  481. }
  482. pr_crit("re-enter exception: ALL breakpoints killed\n");
  483. #ifdef CONFIG_KGDB_KDB
  484. /* Allow kdb to debug itself one level */
  485. return 0;
  486. #endif
  487. dump_stack();
  488. panic("Recursive entry to debugger");
  489. return 1;
  490. }
  491. NOKPROBE_SYMBOL(kgdb_reenter_check);
  492. static void dbg_touch_watchdogs(void)
  493. {
  494. touch_softlockup_watchdog_sync();
  495. clocksource_touch_watchdog();
  496. rcu_cpu_stall_reset();
  497. }
  498. NOKPROBE_SYMBOL(dbg_touch_watchdogs);
  499. static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
  500. int exception_state)
  501. {
  502. unsigned long flags;
  503. int sstep_tries = 100;
  504. int error;
  505. int cpu;
  506. int trace_on = 0;
  507. int online_cpus = num_online_cpus();
  508. u64 time_left;
  509. kgdb_info[ks->cpu].enter_kgdb++;
  510. kgdb_info[ks->cpu].exception_state |= exception_state;
  511. if (exception_state == DCPU_WANT_MASTER)
  512. atomic_inc(&masters_in_kgdb);
  513. else
  514. atomic_inc(&slaves_in_kgdb);
  515. if (arch_kgdb_ops.disable_hw_break)
  516. arch_kgdb_ops.disable_hw_break(regs);
  517. acquirelock:
  518. rcu_read_lock();
  519. /*
  520. * Interrupts will be restored by the 'trap return' code, except when
  521. * single stepping.
  522. */
  523. local_irq_save(flags);
  524. cpu = ks->cpu;
  525. kgdb_info[cpu].debuggerinfo = regs;
  526. kgdb_info[cpu].task = current;
  527. kgdb_info[cpu].ret_state = 0;
  528. kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
  529. /* Make sure the above info reaches the primary CPU */
  530. smp_mb();
  531. if (exception_level == 1) {
  532. if (raw_spin_trylock(&dbg_master_lock))
  533. atomic_xchg(&kgdb_active, cpu);
  534. goto cpu_master_loop;
  535. }
  536. /*
  537. * CPU will loop if it is a slave or request to become a kgdb
  538. * master cpu and acquire the kgdb_active lock:
  539. */
  540. while (1) {
  541. cpu_loop:
  542. if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
  543. kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
  544. goto cpu_master_loop;
  545. } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
  546. if (raw_spin_trylock(&dbg_master_lock)) {
  547. atomic_xchg(&kgdb_active, cpu);
  548. break;
  549. }
  550. } else if (kgdb_info[cpu].exception_state & DCPU_WANT_BT) {
  551. dump_stack();
  552. kgdb_info[cpu].exception_state &= ~DCPU_WANT_BT;
  553. } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
  554. if (!raw_spin_is_locked(&dbg_slave_lock))
  555. goto return_normal;
  556. } else {
  557. return_normal:
  558. /* Return to normal operation by executing any
  559. * hw breakpoint fixup.
  560. */
  561. if (arch_kgdb_ops.correct_hw_break)
  562. arch_kgdb_ops.correct_hw_break();
  563. if (trace_on)
  564. tracing_on();
  565. kgdb_info[cpu].debuggerinfo = NULL;
  566. kgdb_info[cpu].task = NULL;
  567. kgdb_info[cpu].exception_state &=
  568. ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
  569. kgdb_info[cpu].enter_kgdb--;
  570. smp_mb__before_atomic();
  571. atomic_dec(&slaves_in_kgdb);
  572. dbg_touch_watchdogs();
  573. local_irq_restore(flags);
  574. rcu_read_unlock();
  575. return 0;
  576. }
  577. cpu_relax();
  578. }
  579. /*
  580. * For single stepping, try to only enter on the processor
  581. * that was single stepping. To guard against a deadlock, the
  582. * kernel will only try for the value of sstep_tries before
  583. * giving up and continuing on.
  584. */
  585. if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
  586. (kgdb_info[cpu].task &&
  587. kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
  588. atomic_set(&kgdb_active, -1);
  589. raw_spin_unlock(&dbg_master_lock);
  590. dbg_touch_watchdogs();
  591. local_irq_restore(flags);
  592. rcu_read_unlock();
  593. goto acquirelock;
  594. }
  595. if (!kgdb_io_ready(1)) {
  596. kgdb_info[cpu].ret_state = 1;
  597. goto kgdb_restore; /* No I/O connection, resume the system */
  598. }
  599. /*
  600. * Don't enter if we have hit a removed breakpoint.
  601. */
  602. if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
  603. goto kgdb_restore;
  604. atomic_inc(&ignore_console_lock_warning);
  605. /* Call the I/O driver's pre_exception routine */
  606. if (dbg_io_ops->pre_exception)
  607. dbg_io_ops->pre_exception();
  608. /*
  609. * Get the passive CPU lock which will hold all the non-primary
  610. * CPU in a spin state while the debugger is active
  611. */
  612. if (!kgdb_single_step)
  613. raw_spin_lock(&dbg_slave_lock);
  614. #ifdef CONFIG_SMP
  615. /* If send_ready set, slaves are already waiting */
  616. if (ks->send_ready)
  617. atomic_set(ks->send_ready, 1);
  618. /* Signal the other CPUs to enter kgdb_wait() */
  619. else if ((!kgdb_single_step) && kgdb_do_roundup)
  620. kgdb_roundup_cpus();
  621. #endif
  622. /*
  623. * Wait for the other CPUs to be notified and be waiting for us:
  624. */
  625. time_left = MSEC_PER_SEC;
  626. while (kgdb_do_roundup && --time_left &&
  627. (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
  628. online_cpus)
  629. udelay(1000);
  630. if (!time_left)
  631. pr_crit("Timed out waiting for secondary CPUs.\n");
  632. /*
  633. * At this point the primary processor is completely
  634. * in the debugger and all secondary CPUs are quiescent
  635. */
  636. dbg_deactivate_sw_breakpoints();
  637. kgdb_single_step = 0;
  638. kgdb_contthread = current;
  639. exception_level = 0;
  640. trace_on = tracing_is_on();
  641. if (trace_on)
  642. tracing_off();
  643. while (1) {
  644. cpu_master_loop:
  645. if (dbg_kdb_mode) {
  646. kgdb_connected = 1;
  647. error = kdb_stub(ks);
  648. if (error == -1)
  649. continue;
  650. kgdb_connected = 0;
  651. } else {
  652. error = gdb_serial_stub(ks);
  653. }
  654. if (error == DBG_PASS_EVENT) {
  655. dbg_kdb_mode = !dbg_kdb_mode;
  656. } else if (error == DBG_SWITCH_CPU_EVENT) {
  657. kgdb_info[dbg_switch_cpu].exception_state |=
  658. DCPU_NEXT_MASTER;
  659. goto cpu_loop;
  660. } else {
  661. kgdb_info[cpu].ret_state = error;
  662. break;
  663. }
  664. }
  665. dbg_activate_sw_breakpoints();
  666. /* Call the I/O driver's post_exception routine */
  667. if (dbg_io_ops->post_exception)
  668. dbg_io_ops->post_exception();
  669. atomic_dec(&ignore_console_lock_warning);
  670. if (!kgdb_single_step) {
  671. raw_spin_unlock(&dbg_slave_lock);
  672. /* Wait till all the CPUs have quit from the debugger. */
  673. while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
  674. cpu_relax();
  675. }
  676. kgdb_restore:
  677. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  678. int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
  679. if (kgdb_info[sstep_cpu].task)
  680. kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
  681. else
  682. kgdb_sstep_pid = 0;
  683. }
  684. if (arch_kgdb_ops.correct_hw_break)
  685. arch_kgdb_ops.correct_hw_break();
  686. if (trace_on)
  687. tracing_on();
  688. kgdb_info[cpu].debuggerinfo = NULL;
  689. kgdb_info[cpu].task = NULL;
  690. kgdb_info[cpu].exception_state &=
  691. ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
  692. kgdb_info[cpu].enter_kgdb--;
  693. smp_mb__before_atomic();
  694. atomic_dec(&masters_in_kgdb);
  695. /* Free kgdb_active */
  696. atomic_set(&kgdb_active, -1);
  697. raw_spin_unlock(&dbg_master_lock);
  698. dbg_touch_watchdogs();
  699. local_irq_restore(flags);
  700. rcu_read_unlock();
  701. return kgdb_info[cpu].ret_state;
  702. }
  703. NOKPROBE_SYMBOL(kgdb_cpu_enter);
  704. /*
  705. * kgdb_handle_exception() - main entry point from a kernel exception
  706. *
  707. * Locking hierarchy:
  708. * interface locks, if any (begin_session)
  709. * kgdb lock (kgdb_active)
  710. */
  711. int
  712. kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
  713. {
  714. struct kgdb_state kgdb_var;
  715. struct kgdb_state *ks = &kgdb_var;
  716. int ret = 0;
  717. if (arch_kgdb_ops.enable_nmi)
  718. arch_kgdb_ops.enable_nmi(0);
  719. /*
  720. * Avoid entering the debugger if we were triggered due to an oops
  721. * but panic_timeout indicates the system should automatically
  722. * reboot on panic. We don't want to get stuck waiting for input
  723. * on such systems, especially if its "just" an oops.
  724. */
  725. if (signo != SIGTRAP && panic_timeout)
  726. return 1;
  727. memset(ks, 0, sizeof(struct kgdb_state));
  728. ks->cpu = raw_smp_processor_id();
  729. ks->ex_vector = evector;
  730. ks->signo = signo;
  731. ks->err_code = ecode;
  732. ks->linux_regs = regs;
  733. if (kgdb_reenter_check(ks))
  734. goto out; /* Ouch, double exception ! */
  735. if (kgdb_info[ks->cpu].enter_kgdb != 0)
  736. goto out;
  737. ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
  738. out:
  739. if (arch_kgdb_ops.enable_nmi)
  740. arch_kgdb_ops.enable_nmi(1);
  741. return ret;
  742. }
  743. NOKPROBE_SYMBOL(kgdb_handle_exception);
  744. /*
  745. * GDB places a breakpoint at this function to know dynamically loaded objects.
  746. */
  747. static int module_event(struct notifier_block *self, unsigned long val,
  748. void *data)
  749. {
  750. return 0;
  751. }
  752. static struct notifier_block dbg_module_load_nb = {
  753. .notifier_call = module_event,
  754. };
  755. int kgdb_nmicallback(int cpu, void *regs)
  756. {
  757. #ifdef CONFIG_SMP
  758. struct kgdb_state kgdb_var;
  759. struct kgdb_state *ks = &kgdb_var;
  760. kgdb_info[cpu].rounding_up = false;
  761. memset(ks, 0, sizeof(struct kgdb_state));
  762. ks->cpu = cpu;
  763. ks->linux_regs = regs;
  764. if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
  765. raw_spin_is_locked(&dbg_master_lock)) {
  766. kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
  767. return 0;
  768. }
  769. #endif
  770. return 1;
  771. }
  772. NOKPROBE_SYMBOL(kgdb_nmicallback);
  773. int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
  774. atomic_t *send_ready)
  775. {
  776. #ifdef CONFIG_SMP
  777. if (!kgdb_io_ready(0) || !send_ready)
  778. return 1;
  779. if (kgdb_info[cpu].enter_kgdb == 0) {
  780. struct kgdb_state kgdb_var;
  781. struct kgdb_state *ks = &kgdb_var;
  782. memset(ks, 0, sizeof(struct kgdb_state));
  783. ks->cpu = cpu;
  784. ks->ex_vector = trapnr;
  785. ks->signo = SIGTRAP;
  786. ks->err_code = err_code;
  787. ks->linux_regs = regs;
  788. ks->send_ready = send_ready;
  789. kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
  790. return 0;
  791. }
  792. #endif
  793. return 1;
  794. }
  795. NOKPROBE_SYMBOL(kgdb_nmicallin);
  796. static void kgdb_console_write(struct console *co, const char *s,
  797. unsigned count)
  798. {
  799. unsigned long flags;
  800. /* If we're debugging, or KGDB has not connected, don't try
  801. * and print. */
  802. if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
  803. return;
  804. local_irq_save(flags);
  805. gdbstub_msg_write(s, count);
  806. local_irq_restore(flags);
  807. }
  808. static struct console kgdbcons = {
  809. .name = "kgdb",
  810. .write = kgdb_console_write,
  811. .flags = CON_PRINTBUFFER | CON_ENABLED,
  812. .index = -1,
  813. };
  814. static int __init opt_kgdb_con(char *str)
  815. {
  816. kgdb_use_con = 1;
  817. if (kgdb_io_module_registered && !kgdb_con_registered) {
  818. register_console(&kgdbcons);
  819. kgdb_con_registered = 1;
  820. }
  821. return 0;
  822. }
  823. early_param("kgdbcon", opt_kgdb_con);
  824. #ifdef CONFIG_MAGIC_SYSRQ
  825. static void sysrq_handle_dbg(int key)
  826. {
  827. if (!dbg_io_ops) {
  828. pr_crit("ERROR: No KGDB I/O module available\n");
  829. return;
  830. }
  831. if (!kgdb_connected) {
  832. #ifdef CONFIG_KGDB_KDB
  833. if (!dbg_kdb_mode)
  834. pr_crit("KGDB or $3#33 for KDB\n");
  835. #else
  836. pr_crit("Entering KGDB\n");
  837. #endif
  838. }
  839. kgdb_breakpoint();
  840. }
  841. static const struct sysrq_key_op sysrq_dbg_op = {
  842. .handler = sysrq_handle_dbg,
  843. .help_msg = "debug(g)",
  844. .action_msg = "DEBUG",
  845. };
  846. #endif
  847. void kgdb_panic(const char *msg)
  848. {
  849. if (!kgdb_io_module_registered)
  850. return;
  851. /*
  852. * We don't want to get stuck waiting for input from user if
  853. * "panic_timeout" indicates the system should automatically
  854. * reboot on panic.
  855. */
  856. if (panic_timeout)
  857. return;
  858. if (dbg_kdb_mode)
  859. kdb_printf("PANIC: %s\n", msg);
  860. kgdb_breakpoint();
  861. }
  862. static void kgdb_initial_breakpoint(void)
  863. {
  864. kgdb_break_asap = 0;
  865. pr_crit("Waiting for connection from remote gdb...\n");
  866. kgdb_breakpoint();
  867. }
  868. void __weak kgdb_arch_late(void)
  869. {
  870. }
  871. void __init dbg_late_init(void)
  872. {
  873. dbg_is_early = false;
  874. if (kgdb_io_module_registered)
  875. kgdb_arch_late();
  876. kdb_init(KDB_INIT_FULL);
  877. if (kgdb_io_module_registered && kgdb_break_asap)
  878. kgdb_initial_breakpoint();
  879. }
  880. static int
  881. dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
  882. {
  883. /*
  884. * Take the following action on reboot notify depending on value:
  885. * 1 == Enter debugger
  886. * 0 == [the default] detatch debug client
  887. * -1 == Do nothing... and use this until the board resets
  888. */
  889. switch (kgdbreboot) {
  890. case 1:
  891. kgdb_breakpoint();
  892. case -1:
  893. goto done;
  894. }
  895. if (!dbg_kdb_mode)
  896. gdbstub_exit(code);
  897. done:
  898. return NOTIFY_DONE;
  899. }
  900. static struct notifier_block dbg_reboot_notifier = {
  901. .notifier_call = dbg_notify_reboot,
  902. .next = NULL,
  903. .priority = INT_MAX,
  904. };
  905. static void kgdb_register_callbacks(void)
  906. {
  907. if (!kgdb_io_module_registered) {
  908. kgdb_io_module_registered = 1;
  909. kgdb_arch_init();
  910. if (!dbg_is_early)
  911. kgdb_arch_late();
  912. register_module_notifier(&dbg_module_load_nb);
  913. register_reboot_notifier(&dbg_reboot_notifier);
  914. #ifdef CONFIG_MAGIC_SYSRQ
  915. register_sysrq_key('g', &sysrq_dbg_op);
  916. #endif
  917. if (kgdb_use_con && !kgdb_con_registered) {
  918. register_console(&kgdbcons);
  919. kgdb_con_registered = 1;
  920. }
  921. }
  922. }
  923. static void kgdb_unregister_callbacks(void)
  924. {
  925. /*
  926. * When this routine is called KGDB should unregister from
  927. * handlers and clean up, making sure it is not handling any
  928. * break exceptions at the time.
  929. */
  930. if (kgdb_io_module_registered) {
  931. kgdb_io_module_registered = 0;
  932. unregister_reboot_notifier(&dbg_reboot_notifier);
  933. unregister_module_notifier(&dbg_module_load_nb);
  934. kgdb_arch_exit();
  935. #ifdef CONFIG_MAGIC_SYSRQ
  936. unregister_sysrq_key('g', &sysrq_dbg_op);
  937. #endif
  938. if (kgdb_con_registered) {
  939. unregister_console(&kgdbcons);
  940. kgdb_con_registered = 0;
  941. }
  942. }
  943. }
  944. /*
  945. * There are times a tasklet needs to be used vs a compiled in
  946. * break point so as to cause an exception outside a kgdb I/O module,
  947. * such as is the case with kgdboe, where calling a breakpoint in the
  948. * I/O driver itself would be fatal.
  949. */
  950. static void kgdb_tasklet_bpt(unsigned long ing)
  951. {
  952. kgdb_breakpoint();
  953. atomic_set(&kgdb_break_tasklet_var, 0);
  954. }
  955. static DECLARE_TASKLET_OLD(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt);
  956. void kgdb_schedule_breakpoint(void)
  957. {
  958. if (atomic_read(&kgdb_break_tasklet_var) ||
  959. atomic_read(&kgdb_active) != -1 ||
  960. atomic_read(&kgdb_setting_breakpoint))
  961. return;
  962. atomic_inc(&kgdb_break_tasklet_var);
  963. tasklet_schedule(&kgdb_tasklet_breakpoint);
  964. }
  965. EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
  966. /**
  967. * kgdb_register_io_module - register KGDB IO module
  968. * @new_dbg_io_ops: the io ops vector
  969. *
  970. * Register it with the KGDB core.
  971. */
  972. int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
  973. {
  974. struct kgdb_io *old_dbg_io_ops;
  975. int err;
  976. spin_lock(&kgdb_registration_lock);
  977. old_dbg_io_ops = dbg_io_ops;
  978. if (old_dbg_io_ops) {
  979. if (!old_dbg_io_ops->deinit) {
  980. spin_unlock(&kgdb_registration_lock);
  981. pr_err("KGDB I/O driver %s can't replace %s.\n",
  982. new_dbg_io_ops->name, old_dbg_io_ops->name);
  983. return -EBUSY;
  984. }
  985. pr_info("Replacing I/O driver %s with %s\n",
  986. old_dbg_io_ops->name, new_dbg_io_ops->name);
  987. }
  988. if (new_dbg_io_ops->init) {
  989. err = new_dbg_io_ops->init();
  990. if (err) {
  991. spin_unlock(&kgdb_registration_lock);
  992. return err;
  993. }
  994. }
  995. dbg_io_ops = new_dbg_io_ops;
  996. spin_unlock(&kgdb_registration_lock);
  997. if (old_dbg_io_ops) {
  998. old_dbg_io_ops->deinit();
  999. return 0;
  1000. }
  1001. pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
  1002. /* Arm KGDB now. */
  1003. kgdb_register_callbacks();
  1004. if (kgdb_break_asap &&
  1005. (!dbg_is_early || IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG)))
  1006. kgdb_initial_breakpoint();
  1007. return 0;
  1008. }
  1009. EXPORT_SYMBOL_GPL(kgdb_register_io_module);
  1010. /**
  1011. * kkgdb_unregister_io_module - unregister KGDB IO module
  1012. * @old_dbg_io_ops: the io ops vector
  1013. *
  1014. * Unregister it with the KGDB core.
  1015. */
  1016. void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
  1017. {
  1018. BUG_ON(kgdb_connected);
  1019. /*
  1020. * KGDB is no longer able to communicate out, so
  1021. * unregister our callbacks and reset state.
  1022. */
  1023. kgdb_unregister_callbacks();
  1024. spin_lock(&kgdb_registration_lock);
  1025. WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
  1026. dbg_io_ops = NULL;
  1027. spin_unlock(&kgdb_registration_lock);
  1028. if (old_dbg_io_ops->deinit)
  1029. old_dbg_io_ops->deinit();
  1030. pr_info("Unregistered I/O driver %s, debugger disabled\n",
  1031. old_dbg_io_ops->name);
  1032. }
  1033. EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
  1034. int dbg_io_get_char(void)
  1035. {
  1036. int ret = dbg_io_ops->read_char();
  1037. if (ret == NO_POLL_CHAR)
  1038. return -1;
  1039. if (!dbg_kdb_mode)
  1040. return ret;
  1041. if (ret == 127)
  1042. return 8;
  1043. return ret;
  1044. }
  1045. /**
  1046. * kgdb_breakpoint - generate breakpoint exception
  1047. *
  1048. * This function will generate a breakpoint exception. It is used at the
  1049. * beginning of a program to sync up with a debugger and can be used
  1050. * otherwise as a quick means to stop program execution and "break" into
  1051. * the debugger.
  1052. */
  1053. noinline void kgdb_breakpoint(void)
  1054. {
  1055. atomic_inc(&kgdb_setting_breakpoint);
  1056. wmb(); /* Sync point before breakpoint */
  1057. arch_kgdb_breakpoint();
  1058. wmb(); /* Sync point after breakpoint */
  1059. atomic_dec(&kgdb_setting_breakpoint);
  1060. }
  1061. EXPORT_SYMBOL_GPL(kgdb_breakpoint);
  1062. static int __init opt_kgdb_wait(char *str)
  1063. {
  1064. kgdb_break_asap = 1;
  1065. kdb_init(KDB_INIT_EARLY);
  1066. if (kgdb_io_module_registered &&
  1067. IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG))
  1068. kgdb_initial_breakpoint();
  1069. return 0;
  1070. }
  1071. early_param("kgdbwait", opt_kgdb_wait);