mce.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Machine check exception handling.
  4. *
  5. * Copyright 2013 IBM Corporation
  6. * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
  7. */
  8. #undef DEBUG
  9. #define pr_fmt(fmt) "mce: " fmt
  10. #include <linux/hardirq.h>
  11. #include <linux/types.h>
  12. #include <linux/ptrace.h>
  13. #include <linux/percpu.h>
  14. #include <linux/export.h>
  15. #include <linux/irq_work.h>
  16. #include <linux/extable.h>
  17. #include <linux/ftrace.h>
  18. #include <asm/machdep.h>
  19. #include <asm/mce.h>
  20. #include <asm/nmi.h>
  21. static DEFINE_PER_CPU(int, mce_nest_count);
  22. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
  23. /* Queue for delayed MCE events. */
  24. static DEFINE_PER_CPU(int, mce_queue_count);
  25. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
  26. /* Queue for delayed MCE UE events. */
  27. static DEFINE_PER_CPU(int, mce_ue_count);
  28. static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
  29. mce_ue_event_queue);
  30. static void machine_check_process_queued_event(struct irq_work *work);
  31. static void machine_check_ue_irq_work(struct irq_work *work);
  32. static void machine_check_ue_event(struct machine_check_event *evt);
  33. static void machine_process_ue_event(struct work_struct *work);
  34. static struct irq_work mce_event_process_work = {
  35. .func = machine_check_process_queued_event,
  36. };
  37. static struct irq_work mce_ue_event_irq_work = {
  38. .func = machine_check_ue_irq_work,
  39. };
  40. DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
  41. static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
  42. int mce_register_notifier(struct notifier_block *nb)
  43. {
  44. return blocking_notifier_chain_register(&mce_notifier_list, nb);
  45. }
  46. EXPORT_SYMBOL_GPL(mce_register_notifier);
  47. int mce_unregister_notifier(struct notifier_block *nb)
  48. {
  49. return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
  50. }
  51. EXPORT_SYMBOL_GPL(mce_unregister_notifier);
  52. static void mce_set_error_info(struct machine_check_event *mce,
  53. struct mce_error_info *mce_err)
  54. {
  55. mce->error_type = mce_err->error_type;
  56. switch (mce_err->error_type) {
  57. case MCE_ERROR_TYPE_UE:
  58. mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
  59. break;
  60. case MCE_ERROR_TYPE_SLB:
  61. mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
  62. break;
  63. case MCE_ERROR_TYPE_ERAT:
  64. mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
  65. break;
  66. case MCE_ERROR_TYPE_TLB:
  67. mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
  68. break;
  69. case MCE_ERROR_TYPE_USER:
  70. mce->u.user_error.user_error_type = mce_err->u.user_error_type;
  71. break;
  72. case MCE_ERROR_TYPE_RA:
  73. mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
  74. break;
  75. case MCE_ERROR_TYPE_LINK:
  76. mce->u.link_error.link_error_type = mce_err->u.link_error_type;
  77. break;
  78. case MCE_ERROR_TYPE_UNKNOWN:
  79. default:
  80. break;
  81. }
  82. }
  83. /*
  84. * Decode and save high level MCE information into per cpu buffer which
  85. * is an array of machine_check_event structure.
  86. */
  87. void save_mce_event(struct pt_regs *regs, long handled,
  88. struct mce_error_info *mce_err,
  89. uint64_t nip, uint64_t addr, uint64_t phys_addr)
  90. {
  91. int index = __this_cpu_inc_return(mce_nest_count) - 1;
  92. struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
  93. /*
  94. * Return if we don't have enough space to log mce event.
  95. * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
  96. * the check below will stop buffer overrun.
  97. */
  98. if (index >= MAX_MC_EVT)
  99. return;
  100. /* Populate generic machine check info */
  101. mce->version = MCE_V1;
  102. mce->srr0 = nip;
  103. mce->srr1 = regs->msr;
  104. mce->gpr3 = regs->gpr[3];
  105. mce->in_use = 1;
  106. mce->cpu = get_paca()->paca_index;
  107. /* Mark it recovered if we have handled it and MSR(RI=1). */
  108. if (handled && (regs->msr & MSR_RI))
  109. mce->disposition = MCE_DISPOSITION_RECOVERED;
  110. else
  111. mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
  112. mce->initiator = mce_err->initiator;
  113. mce->severity = mce_err->severity;
  114. mce->sync_error = mce_err->sync_error;
  115. mce->error_class = mce_err->error_class;
  116. /*
  117. * Populate the mce error_type and type-specific error_type.
  118. */
  119. mce_set_error_info(mce, mce_err);
  120. if (!addr)
  121. return;
  122. if (mce->error_type == MCE_ERROR_TYPE_TLB) {
  123. mce->u.tlb_error.effective_address_provided = true;
  124. mce->u.tlb_error.effective_address = addr;
  125. } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
  126. mce->u.slb_error.effective_address_provided = true;
  127. mce->u.slb_error.effective_address = addr;
  128. } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
  129. mce->u.erat_error.effective_address_provided = true;
  130. mce->u.erat_error.effective_address = addr;
  131. } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
  132. mce->u.user_error.effective_address_provided = true;
  133. mce->u.user_error.effective_address = addr;
  134. } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
  135. mce->u.ra_error.effective_address_provided = true;
  136. mce->u.ra_error.effective_address = addr;
  137. } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
  138. mce->u.link_error.effective_address_provided = true;
  139. mce->u.link_error.effective_address = addr;
  140. } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
  141. mce->u.ue_error.effective_address_provided = true;
  142. mce->u.ue_error.effective_address = addr;
  143. if (phys_addr != ULONG_MAX) {
  144. mce->u.ue_error.physical_address_provided = true;
  145. mce->u.ue_error.physical_address = phys_addr;
  146. mce->u.ue_error.ignore_event = mce_err->ignore_event;
  147. machine_check_ue_event(mce);
  148. }
  149. }
  150. return;
  151. }
  152. /*
  153. * get_mce_event:
  154. * mce Pointer to machine_check_event structure to be filled.
  155. * release Flag to indicate whether to free the event slot or not.
  156. * 0 <= do not release the mce event. Caller will invoke
  157. * release_mce_event() once event has been consumed.
  158. * 1 <= release the slot.
  159. *
  160. * return 1 = success
  161. * 0 = failure
  162. *
  163. * get_mce_event() will be called by platform specific machine check
  164. * handle routine and in KVM.
  165. * When we call get_mce_event(), we are still in interrupt context and
  166. * preemption will not be scheduled until ret_from_expect() routine
  167. * is called.
  168. */
  169. int get_mce_event(struct machine_check_event *mce, bool release)
  170. {
  171. int index = __this_cpu_read(mce_nest_count) - 1;
  172. struct machine_check_event *mc_evt;
  173. int ret = 0;
  174. /* Sanity check */
  175. if (index < 0)
  176. return ret;
  177. /* Check if we have MCE info to process. */
  178. if (index < MAX_MC_EVT) {
  179. mc_evt = this_cpu_ptr(&mce_event[index]);
  180. /* Copy the event structure and release the original */
  181. if (mce)
  182. *mce = *mc_evt;
  183. if (release)
  184. mc_evt->in_use = 0;
  185. ret = 1;
  186. }
  187. /* Decrement the count to free the slot. */
  188. if (release)
  189. __this_cpu_dec(mce_nest_count);
  190. return ret;
  191. }
  192. void release_mce_event(void)
  193. {
  194. get_mce_event(NULL, true);
  195. }
  196. static void machine_check_ue_irq_work(struct irq_work *work)
  197. {
  198. schedule_work(&mce_ue_event_work);
  199. }
  200. /*
  201. * Queue up the MCE event which then can be handled later.
  202. */
  203. static void machine_check_ue_event(struct machine_check_event *evt)
  204. {
  205. int index;
  206. index = __this_cpu_inc_return(mce_ue_count) - 1;
  207. /* If queue is full, just return for now. */
  208. if (index >= MAX_MC_EVT) {
  209. __this_cpu_dec(mce_ue_count);
  210. return;
  211. }
  212. memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
  213. /* Queue work to process this event later. */
  214. irq_work_queue(&mce_ue_event_irq_work);
  215. }
  216. /*
  217. * Queue up the MCE event which then can be handled later.
  218. */
  219. void machine_check_queue_event(void)
  220. {
  221. int index;
  222. struct machine_check_event evt;
  223. if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
  224. return;
  225. index = __this_cpu_inc_return(mce_queue_count) - 1;
  226. /* If queue is full, just return for now. */
  227. if (index >= MAX_MC_EVT) {
  228. __this_cpu_dec(mce_queue_count);
  229. return;
  230. }
  231. memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
  232. /* Queue irq work to process this event later. */
  233. irq_work_queue(&mce_event_process_work);
  234. }
  235. void mce_common_process_ue(struct pt_regs *regs,
  236. struct mce_error_info *mce_err)
  237. {
  238. const struct exception_table_entry *entry;
  239. entry = search_kernel_exception_table(regs->nip);
  240. if (entry) {
  241. mce_err->ignore_event = true;
  242. regs->nip = extable_fixup(entry);
  243. }
  244. }
  245. /*
  246. * process pending MCE event from the mce event queue. This function will be
  247. * called during syscall exit.
  248. */
  249. static void machine_process_ue_event(struct work_struct *work)
  250. {
  251. int index;
  252. struct machine_check_event *evt;
  253. while (__this_cpu_read(mce_ue_count) > 0) {
  254. index = __this_cpu_read(mce_ue_count) - 1;
  255. evt = this_cpu_ptr(&mce_ue_event_queue[index]);
  256. blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
  257. #ifdef CONFIG_MEMORY_FAILURE
  258. /*
  259. * This should probably queued elsewhere, but
  260. * oh! well
  261. *
  262. * Don't report this machine check because the caller has a
  263. * asked us to ignore the event, it has a fixup handler which
  264. * will do the appropriate error handling and reporting.
  265. */
  266. if (evt->error_type == MCE_ERROR_TYPE_UE) {
  267. if (evt->u.ue_error.ignore_event) {
  268. __this_cpu_dec(mce_ue_count);
  269. continue;
  270. }
  271. if (evt->u.ue_error.physical_address_provided) {
  272. unsigned long pfn;
  273. pfn = evt->u.ue_error.physical_address >>
  274. PAGE_SHIFT;
  275. memory_failure(pfn, 0);
  276. } else
  277. pr_warn("Failed to identify bad address from "
  278. "where the uncorrectable error (UE) "
  279. "was generated\n");
  280. }
  281. #endif
  282. __this_cpu_dec(mce_ue_count);
  283. }
  284. }
  285. /*
  286. * process pending MCE event from the mce event queue. This function will be
  287. * called during syscall exit.
  288. */
  289. static void machine_check_process_queued_event(struct irq_work *work)
  290. {
  291. int index;
  292. struct machine_check_event *evt;
  293. add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
  294. /*
  295. * For now just print it to console.
  296. * TODO: log this error event to FSP or nvram.
  297. */
  298. while (__this_cpu_read(mce_queue_count) > 0) {
  299. index = __this_cpu_read(mce_queue_count) - 1;
  300. evt = this_cpu_ptr(&mce_event_queue[index]);
  301. if (evt->error_type == MCE_ERROR_TYPE_UE &&
  302. evt->u.ue_error.ignore_event) {
  303. __this_cpu_dec(mce_queue_count);
  304. continue;
  305. }
  306. machine_check_print_event_info(evt, false, false);
  307. __this_cpu_dec(mce_queue_count);
  308. }
  309. }
  310. void machine_check_print_event_info(struct machine_check_event *evt,
  311. bool user_mode, bool in_guest)
  312. {
  313. const char *level, *sevstr, *subtype, *err_type, *initiator;
  314. uint64_t ea = 0, pa = 0;
  315. int n = 0;
  316. char dar_str[50];
  317. char pa_str[50];
  318. static const char *mc_ue_types[] = {
  319. "Indeterminate",
  320. "Instruction fetch",
  321. "Page table walk ifetch",
  322. "Load/Store",
  323. "Page table walk Load/Store",
  324. };
  325. static const char *mc_slb_types[] = {
  326. "Indeterminate",
  327. "Parity",
  328. "Multihit",
  329. };
  330. static const char *mc_erat_types[] = {
  331. "Indeterminate",
  332. "Parity",
  333. "Multihit",
  334. };
  335. static const char *mc_tlb_types[] = {
  336. "Indeterminate",
  337. "Parity",
  338. "Multihit",
  339. };
  340. static const char *mc_user_types[] = {
  341. "Indeterminate",
  342. "tlbie(l) invalid",
  343. "scv invalid",
  344. };
  345. static const char *mc_ra_types[] = {
  346. "Indeterminate",
  347. "Instruction fetch (bad)",
  348. "Instruction fetch (foreign)",
  349. "Page table walk ifetch (bad)",
  350. "Page table walk ifetch (foreign)",
  351. "Load (bad)",
  352. "Store (bad)",
  353. "Page table walk Load/Store (bad)",
  354. "Page table walk Load/Store (foreign)",
  355. "Load/Store (foreign)",
  356. };
  357. static const char *mc_link_types[] = {
  358. "Indeterminate",
  359. "Instruction fetch (timeout)",
  360. "Page table walk ifetch (timeout)",
  361. "Load (timeout)",
  362. "Store (timeout)",
  363. "Page table walk Load/Store (timeout)",
  364. };
  365. static const char *mc_error_class[] = {
  366. "Unknown",
  367. "Hardware error",
  368. "Probable Hardware error (some chance of software cause)",
  369. "Software error",
  370. "Probable Software error (some chance of hardware cause)",
  371. };
  372. /* Print things out */
  373. if (evt->version != MCE_V1) {
  374. pr_err("Machine Check Exception, Unknown event version %d !\n",
  375. evt->version);
  376. return;
  377. }
  378. switch (evt->severity) {
  379. case MCE_SEV_NO_ERROR:
  380. level = KERN_INFO;
  381. sevstr = "Harmless";
  382. break;
  383. case MCE_SEV_WARNING:
  384. level = KERN_WARNING;
  385. sevstr = "Warning";
  386. break;
  387. case MCE_SEV_SEVERE:
  388. level = KERN_ERR;
  389. sevstr = "Severe";
  390. break;
  391. case MCE_SEV_FATAL:
  392. default:
  393. level = KERN_ERR;
  394. sevstr = "Fatal";
  395. break;
  396. }
  397. switch(evt->initiator) {
  398. case MCE_INITIATOR_CPU:
  399. initiator = "CPU";
  400. break;
  401. case MCE_INITIATOR_PCI:
  402. initiator = "PCI";
  403. break;
  404. case MCE_INITIATOR_ISA:
  405. initiator = "ISA";
  406. break;
  407. case MCE_INITIATOR_MEMORY:
  408. initiator = "Memory";
  409. break;
  410. case MCE_INITIATOR_POWERMGM:
  411. initiator = "Power Management";
  412. break;
  413. case MCE_INITIATOR_UNKNOWN:
  414. default:
  415. initiator = "Unknown";
  416. break;
  417. }
  418. switch (evt->error_type) {
  419. case MCE_ERROR_TYPE_UE:
  420. err_type = "UE";
  421. subtype = evt->u.ue_error.ue_error_type <
  422. ARRAY_SIZE(mc_ue_types) ?
  423. mc_ue_types[evt->u.ue_error.ue_error_type]
  424. : "Unknown";
  425. if (evt->u.ue_error.effective_address_provided)
  426. ea = evt->u.ue_error.effective_address;
  427. if (evt->u.ue_error.physical_address_provided)
  428. pa = evt->u.ue_error.physical_address;
  429. break;
  430. case MCE_ERROR_TYPE_SLB:
  431. err_type = "SLB";
  432. subtype = evt->u.slb_error.slb_error_type <
  433. ARRAY_SIZE(mc_slb_types) ?
  434. mc_slb_types[evt->u.slb_error.slb_error_type]
  435. : "Unknown";
  436. if (evt->u.slb_error.effective_address_provided)
  437. ea = evt->u.slb_error.effective_address;
  438. break;
  439. case MCE_ERROR_TYPE_ERAT:
  440. err_type = "ERAT";
  441. subtype = evt->u.erat_error.erat_error_type <
  442. ARRAY_SIZE(mc_erat_types) ?
  443. mc_erat_types[evt->u.erat_error.erat_error_type]
  444. : "Unknown";
  445. if (evt->u.erat_error.effective_address_provided)
  446. ea = evt->u.erat_error.effective_address;
  447. break;
  448. case MCE_ERROR_TYPE_TLB:
  449. err_type = "TLB";
  450. subtype = evt->u.tlb_error.tlb_error_type <
  451. ARRAY_SIZE(mc_tlb_types) ?
  452. mc_tlb_types[evt->u.tlb_error.tlb_error_type]
  453. : "Unknown";
  454. if (evt->u.tlb_error.effective_address_provided)
  455. ea = evt->u.tlb_error.effective_address;
  456. break;
  457. case MCE_ERROR_TYPE_USER:
  458. err_type = "User";
  459. subtype = evt->u.user_error.user_error_type <
  460. ARRAY_SIZE(mc_user_types) ?
  461. mc_user_types[evt->u.user_error.user_error_type]
  462. : "Unknown";
  463. if (evt->u.user_error.effective_address_provided)
  464. ea = evt->u.user_error.effective_address;
  465. break;
  466. case MCE_ERROR_TYPE_RA:
  467. err_type = "Real address";
  468. subtype = evt->u.ra_error.ra_error_type <
  469. ARRAY_SIZE(mc_ra_types) ?
  470. mc_ra_types[evt->u.ra_error.ra_error_type]
  471. : "Unknown";
  472. if (evt->u.ra_error.effective_address_provided)
  473. ea = evt->u.ra_error.effective_address;
  474. break;
  475. case MCE_ERROR_TYPE_LINK:
  476. err_type = "Link";
  477. subtype = evt->u.link_error.link_error_type <
  478. ARRAY_SIZE(mc_link_types) ?
  479. mc_link_types[evt->u.link_error.link_error_type]
  480. : "Unknown";
  481. if (evt->u.link_error.effective_address_provided)
  482. ea = evt->u.link_error.effective_address;
  483. break;
  484. case MCE_ERROR_TYPE_DCACHE:
  485. err_type = "D-Cache";
  486. subtype = "Unknown";
  487. break;
  488. case MCE_ERROR_TYPE_ICACHE:
  489. err_type = "I-Cache";
  490. subtype = "Unknown";
  491. break;
  492. default:
  493. case MCE_ERROR_TYPE_UNKNOWN:
  494. err_type = "Unknown";
  495. subtype = "";
  496. break;
  497. }
  498. dar_str[0] = pa_str[0] = '\0';
  499. if (ea && evt->srr0 != ea) {
  500. /* Load/Store address */
  501. n = sprintf(dar_str, "DAR: %016llx ", ea);
  502. if (pa)
  503. sprintf(dar_str + n, "paddr: %016llx ", pa);
  504. } else if (pa) {
  505. sprintf(pa_str, " paddr: %016llx", pa);
  506. }
  507. printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
  508. level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
  509. err_type, subtype, dar_str,
  510. evt->disposition == MCE_DISPOSITION_RECOVERED ?
  511. "Recovered" : "Not recovered");
  512. if (in_guest || user_mode) {
  513. printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
  514. level, evt->cpu, current->pid, current->comm,
  515. in_guest ? "Guest " : "", evt->srr0, pa_str);
  516. } else {
  517. printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
  518. level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
  519. }
  520. printk("%sMCE: CPU%d: Initiator %s\n", level, evt->cpu, initiator);
  521. subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
  522. mc_error_class[evt->error_class] : "Unknown";
  523. printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
  524. #ifdef CONFIG_PPC_BOOK3S_64
  525. /* Display faulty slb contents for SLB errors. */
  526. if (evt->error_type == MCE_ERROR_TYPE_SLB)
  527. slb_dump_contents(local_paca->mce_faulty_slbs);
  528. #endif
  529. }
  530. EXPORT_SYMBOL_GPL(machine_check_print_event_info);
  531. /*
  532. * This function is called in real mode. Strictly no printk's please.
  533. *
  534. * regs->nip and regs->msr contains srr0 and ssr1.
  535. */
  536. long notrace machine_check_early(struct pt_regs *regs)
  537. {
  538. long handled = 0;
  539. u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
  540. this_cpu_set_ftrace_enabled(0);
  541. /* Do not use nmi_enter/exit for pseries hpte guest */
  542. if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
  543. nmi_enter();
  544. hv_nmi_check_nonrecoverable(regs);
  545. /*
  546. * See if platform is capable of handling machine check.
  547. */
  548. if (ppc_md.machine_check_early)
  549. handled = ppc_md.machine_check_early(regs);
  550. if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
  551. nmi_exit();
  552. this_cpu_set_ftrace_enabled(ftrace_enabled);
  553. return handled;
  554. }
  555. /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
  556. static enum {
  557. DTRIG_UNKNOWN,
  558. DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
  559. DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
  560. } hmer_debug_trig_function;
  561. static int init_debug_trig_function(void)
  562. {
  563. int pvr;
  564. struct device_node *cpun;
  565. struct property *prop = NULL;
  566. const char *str;
  567. /* First look in the device tree */
  568. preempt_disable();
  569. cpun = of_get_cpu_node(smp_processor_id(), NULL);
  570. if (cpun) {
  571. of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
  572. prop, str) {
  573. if (strcmp(str, "bit17-vector-ci-load") == 0)
  574. hmer_debug_trig_function = DTRIG_VECTOR_CI;
  575. else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
  576. hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
  577. }
  578. of_node_put(cpun);
  579. }
  580. preempt_enable();
  581. /* If we found the property, don't look at PVR */
  582. if (prop)
  583. goto out;
  584. pvr = mfspr(SPRN_PVR);
  585. /* Check for POWER9 Nimbus (scale-out) */
  586. if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
  587. /* DD2.2 and later */
  588. if ((pvr & 0xfff) >= 0x202)
  589. hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
  590. /* DD2.0 and DD2.1 - used for vector CI load emulation */
  591. else if ((pvr & 0xfff) >= 0x200)
  592. hmer_debug_trig_function = DTRIG_VECTOR_CI;
  593. }
  594. out:
  595. switch (hmer_debug_trig_function) {
  596. case DTRIG_VECTOR_CI:
  597. pr_debug("HMI debug trigger used for vector CI load\n");
  598. break;
  599. case DTRIG_SUSPEND_ESCAPE:
  600. pr_debug("HMI debug trigger used for TM suspend escape\n");
  601. break;
  602. default:
  603. break;
  604. }
  605. return 0;
  606. }
  607. __initcall(init_debug_trig_function);
  608. /*
  609. * Handle HMIs that occur as a result of a debug trigger.
  610. * Return values:
  611. * -1 means this is not a HMI cause that we know about
  612. * 0 means no further handling is required
  613. * 1 means further handling is required
  614. */
  615. long hmi_handle_debugtrig(struct pt_regs *regs)
  616. {
  617. unsigned long hmer = mfspr(SPRN_HMER);
  618. long ret = 0;
  619. /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
  620. if (!((hmer & HMER_DEBUG_TRIG)
  621. && hmer_debug_trig_function != DTRIG_UNKNOWN))
  622. return -1;
  623. hmer &= ~HMER_DEBUG_TRIG;
  624. /* HMER is a write-AND register */
  625. mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
  626. switch (hmer_debug_trig_function) {
  627. case DTRIG_VECTOR_CI:
  628. /*
  629. * Now to avoid problems with soft-disable we
  630. * only do the emulation if we are coming from
  631. * host user space
  632. */
  633. if (regs && user_mode(regs))
  634. ret = local_paca->hmi_p9_special_emu = 1;
  635. break;
  636. default:
  637. break;
  638. }
  639. /*
  640. * See if any other HMI causes remain to be handled
  641. */
  642. if (hmer & mfspr(SPRN_HMEER))
  643. return -1;
  644. return ret;
  645. }
  646. /*
  647. * Return values:
  648. */
  649. long hmi_exception_realmode(struct pt_regs *regs)
  650. {
  651. int ret;
  652. local_paca->hmi_irqs++;
  653. ret = hmi_handle_debugtrig(regs);
  654. if (ret >= 0)
  655. return ret;
  656. wait_for_subcore_guest_exit();
  657. if (ppc_md.hmi_exception_early)
  658. ppc_md.hmi_exception_early(regs);
  659. wait_for_tb_resync();
  660. return 1;
  661. }