eeh_driver.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
  4. * Copyright IBM Corp. 2004 2005
  5. * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
  6. *
  7. * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/irq.h>
  12. #include <linux/module.h>
  13. #include <linux/pci.h>
  14. #include <linux/pci_hotplug.h>
  15. #include <asm/eeh.h>
  16. #include <asm/eeh_event.h>
  17. #include <asm/ppc-pci.h>
  18. #include <asm/pci-bridge.h>
  19. #include <asm/prom.h>
  20. #include <asm/rtas.h>
  21. struct eeh_rmv_data {
  22. struct list_head removed_vf_list;
  23. int removed_dev_count;
  24. };
  25. static int eeh_result_priority(enum pci_ers_result result)
  26. {
  27. switch (result) {
  28. case PCI_ERS_RESULT_NONE:
  29. return 1;
  30. case PCI_ERS_RESULT_NO_AER_DRIVER:
  31. return 2;
  32. case PCI_ERS_RESULT_RECOVERED:
  33. return 3;
  34. case PCI_ERS_RESULT_CAN_RECOVER:
  35. return 4;
  36. case PCI_ERS_RESULT_DISCONNECT:
  37. return 5;
  38. case PCI_ERS_RESULT_NEED_RESET:
  39. return 6;
  40. default:
  41. WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", (int)result);
  42. return 0;
  43. }
  44. };
  45. static const char *pci_ers_result_name(enum pci_ers_result result)
  46. {
  47. switch (result) {
  48. case PCI_ERS_RESULT_NONE:
  49. return "none";
  50. case PCI_ERS_RESULT_CAN_RECOVER:
  51. return "can recover";
  52. case PCI_ERS_RESULT_NEED_RESET:
  53. return "need reset";
  54. case PCI_ERS_RESULT_DISCONNECT:
  55. return "disconnect";
  56. case PCI_ERS_RESULT_RECOVERED:
  57. return "recovered";
  58. case PCI_ERS_RESULT_NO_AER_DRIVER:
  59. return "no AER driver";
  60. default:
  61. WARN_ONCE(1, "Unknown result type: %d\n", (int)result);
  62. return "unknown";
  63. }
  64. };
  65. static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old,
  66. enum pci_ers_result new)
  67. {
  68. if (eeh_result_priority(new) > eeh_result_priority(old))
  69. return new;
  70. return old;
  71. }
  72. static bool eeh_dev_removed(struct eeh_dev *edev)
  73. {
  74. return !edev || (edev->mode & EEH_DEV_REMOVED);
  75. }
  76. static bool eeh_edev_actionable(struct eeh_dev *edev)
  77. {
  78. if (!edev->pdev)
  79. return false;
  80. if (edev->pdev->error_state == pci_channel_io_perm_failure)
  81. return false;
  82. if (eeh_dev_removed(edev))
  83. return false;
  84. if (eeh_pe_passed(edev->pe))
  85. return false;
  86. return true;
  87. }
  88. /**
  89. * eeh_pcid_get - Get the PCI device driver
  90. * @pdev: PCI device
  91. *
  92. * The function is used to retrieve the PCI device driver for
  93. * the indicated PCI device. Besides, we will increase the reference
  94. * of the PCI device driver to prevent that being unloaded on
  95. * the fly. Otherwise, kernel crash would be seen.
  96. */
  97. static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
  98. {
  99. if (!pdev || !pdev->driver)
  100. return NULL;
  101. if (!try_module_get(pdev->driver->driver.owner))
  102. return NULL;
  103. return pdev->driver;
  104. }
  105. /**
  106. * eeh_pcid_put - Dereference on the PCI device driver
  107. * @pdev: PCI device
  108. *
  109. * The function is called to do dereference on the PCI device
  110. * driver of the indicated PCI device.
  111. */
  112. static inline void eeh_pcid_put(struct pci_dev *pdev)
  113. {
  114. if (!pdev || !pdev->driver)
  115. return;
  116. module_put(pdev->driver->driver.owner);
  117. }
  118. /**
  119. * eeh_disable_irq - Disable interrupt for the recovering device
  120. * @dev: PCI device
  121. *
  122. * This routine must be called when reporting temporary or permanent
  123. * error to the particular PCI device to disable interrupt of that
  124. * device. If the device has enabled MSI or MSI-X interrupt, we needn't
  125. * do real work because EEH should freeze DMA transfers for those PCI
  126. * devices encountering EEH errors, which includes MSI or MSI-X.
  127. */
  128. static void eeh_disable_irq(struct eeh_dev *edev)
  129. {
  130. /* Don't disable MSI and MSI-X interrupts. They are
  131. * effectively disabled by the DMA Stopped state
  132. * when an EEH error occurs.
  133. */
  134. if (edev->pdev->msi_enabled || edev->pdev->msix_enabled)
  135. return;
  136. if (!irq_has_action(edev->pdev->irq))
  137. return;
  138. edev->mode |= EEH_DEV_IRQ_DISABLED;
  139. disable_irq_nosync(edev->pdev->irq);
  140. }
  141. /**
  142. * eeh_enable_irq - Enable interrupt for the recovering device
  143. * @dev: PCI device
  144. *
  145. * This routine must be called to enable interrupt while failed
  146. * device could be resumed.
  147. */
  148. static void eeh_enable_irq(struct eeh_dev *edev)
  149. {
  150. if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
  151. edev->mode &= ~EEH_DEV_IRQ_DISABLED;
  152. /*
  153. * FIXME !!!!!
  154. *
  155. * This is just ass backwards. This maze has
  156. * unbalanced irq_enable/disable calls. So instead of
  157. * finding the root cause it works around the warning
  158. * in the irq_enable code by conditionally calling
  159. * into it.
  160. *
  161. * That's just wrong.The warning in the core code is
  162. * there to tell people to fix their asymmetries in
  163. * their own code, not by abusing the core information
  164. * to avoid it.
  165. *
  166. * I so wish that the assymetry would be the other way
  167. * round and a few more irq_disable calls render that
  168. * shit unusable forever.
  169. *
  170. * tglx
  171. */
  172. if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq)))
  173. enable_irq(edev->pdev->irq);
  174. }
  175. }
  176. static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata)
  177. {
  178. struct pci_dev *pdev;
  179. if (!edev)
  180. return;
  181. /*
  182. * We cannot access the config space on some adapters.
  183. * Otherwise, it will cause fenced PHB. We don't save
  184. * the content in their config space and will restore
  185. * from the initial config space saved when the EEH
  186. * device is created.
  187. */
  188. if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
  189. return;
  190. pdev = eeh_dev_to_pci_dev(edev);
  191. if (!pdev)
  192. return;
  193. pci_save_state(pdev);
  194. }
  195. static void eeh_set_channel_state(struct eeh_pe *root, pci_channel_state_t s)
  196. {
  197. struct eeh_pe *pe;
  198. struct eeh_dev *edev, *tmp;
  199. eeh_for_each_pe(root, pe)
  200. eeh_pe_for_each_dev(pe, edev, tmp)
  201. if (eeh_edev_actionable(edev))
  202. edev->pdev->error_state = s;
  203. }
  204. static void eeh_set_irq_state(struct eeh_pe *root, bool enable)
  205. {
  206. struct eeh_pe *pe;
  207. struct eeh_dev *edev, *tmp;
  208. eeh_for_each_pe(root, pe) {
  209. eeh_pe_for_each_dev(pe, edev, tmp) {
  210. if (!eeh_edev_actionable(edev))
  211. continue;
  212. if (!eeh_pcid_get(edev->pdev))
  213. continue;
  214. if (enable)
  215. eeh_enable_irq(edev);
  216. else
  217. eeh_disable_irq(edev);
  218. eeh_pcid_put(edev->pdev);
  219. }
  220. }
  221. }
  222. typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *,
  223. struct pci_dev *,
  224. struct pci_driver *);
  225. static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
  226. enum pci_ers_result *result)
  227. {
  228. struct pci_dev *pdev;
  229. struct pci_driver *driver;
  230. enum pci_ers_result new_result;
  231. pci_lock_rescan_remove();
  232. pdev = edev->pdev;
  233. if (pdev)
  234. get_device(&pdev->dev);
  235. pci_unlock_rescan_remove();
  236. if (!pdev) {
  237. eeh_edev_info(edev, "no device");
  238. return;
  239. }
  240. device_lock(&pdev->dev);
  241. if (eeh_edev_actionable(edev)) {
  242. driver = eeh_pcid_get(pdev);
  243. if (!driver)
  244. eeh_edev_info(edev, "no driver");
  245. else if (!driver->err_handler)
  246. eeh_edev_info(edev, "driver not EEH aware");
  247. else if (edev->mode & EEH_DEV_NO_HANDLER)
  248. eeh_edev_info(edev, "driver bound too late");
  249. else {
  250. new_result = fn(edev, pdev, driver);
  251. eeh_edev_info(edev, "%s driver reports: '%s'",
  252. driver->name,
  253. pci_ers_result_name(new_result));
  254. if (result)
  255. *result = pci_ers_merge_result(*result,
  256. new_result);
  257. }
  258. if (driver)
  259. eeh_pcid_put(pdev);
  260. } else {
  261. eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!pdev,
  262. !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe));
  263. }
  264. device_unlock(&pdev->dev);
  265. if (edev->pdev != pdev)
  266. eeh_edev_warn(edev, "Device changed during processing!\n");
  267. put_device(&pdev->dev);
  268. }
  269. static void eeh_pe_report(const char *name, struct eeh_pe *root,
  270. eeh_report_fn fn, enum pci_ers_result *result)
  271. {
  272. struct eeh_pe *pe;
  273. struct eeh_dev *edev, *tmp;
  274. pr_info("EEH: Beginning: '%s'\n", name);
  275. eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp)
  276. eeh_pe_report_edev(edev, fn, result);
  277. if (result)
  278. pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n",
  279. name, pci_ers_result_name(*result));
  280. else
  281. pr_info("EEH: Finished:'%s'", name);
  282. }
  283. /**
  284. * eeh_report_error - Report pci error to each device driver
  285. * @edev: eeh device
  286. * @driver: device's PCI driver
  287. *
  288. * Report an EEH error to each device driver.
  289. */
  290. static enum pci_ers_result eeh_report_error(struct eeh_dev *edev,
  291. struct pci_dev *pdev,
  292. struct pci_driver *driver)
  293. {
  294. enum pci_ers_result rc;
  295. if (!driver->err_handler->error_detected)
  296. return PCI_ERS_RESULT_NONE;
  297. eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)",
  298. driver->name);
  299. rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen);
  300. edev->in_error = true;
  301. pci_uevent_ers(pdev, PCI_ERS_RESULT_NONE);
  302. return rc;
  303. }
  304. /**
  305. * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
  306. * @edev: eeh device
  307. * @driver: device's PCI driver
  308. *
  309. * Tells each device driver that IO ports, MMIO and config space I/O
  310. * are now enabled.
  311. */
  312. static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev,
  313. struct pci_dev *pdev,
  314. struct pci_driver *driver)
  315. {
  316. if (!driver->err_handler->mmio_enabled)
  317. return PCI_ERS_RESULT_NONE;
  318. eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name);
  319. return driver->err_handler->mmio_enabled(pdev);
  320. }
  321. /**
  322. * eeh_report_reset - Tell device that slot has been reset
  323. * @edev: eeh device
  324. * @driver: device's PCI driver
  325. *
  326. * This routine must be called while EEH tries to reset particular
  327. * PCI device so that the associated PCI device driver could take
  328. * some actions, usually to save data the driver needs so that the
  329. * driver can work again while the device is recovered.
  330. */
  331. static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev,
  332. struct pci_dev *pdev,
  333. struct pci_driver *driver)
  334. {
  335. if (!driver->err_handler->slot_reset || !edev->in_error)
  336. return PCI_ERS_RESULT_NONE;
  337. eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name);
  338. return driver->err_handler->slot_reset(pdev);
  339. }
  340. static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
  341. {
  342. struct pci_dev *pdev;
  343. if (!edev)
  344. return;
  345. /*
  346. * The content in the config space isn't saved because
  347. * the blocked config space on some adapters. We have
  348. * to restore the initial saved config space when the
  349. * EEH device is created.
  350. */
  351. if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
  352. if (list_is_last(&edev->entry, &edev->pe->edevs))
  353. eeh_pe_restore_bars(edev->pe);
  354. return;
  355. }
  356. pdev = eeh_dev_to_pci_dev(edev);
  357. if (!pdev)
  358. return;
  359. pci_restore_state(pdev);
  360. }
  361. /**
  362. * eeh_report_resume - Tell device to resume normal operations
  363. * @edev: eeh device
  364. * @driver: device's PCI driver
  365. *
  366. * This routine must be called to notify the device driver that it
  367. * could resume so that the device driver can do some initialization
  368. * to make the recovered device work again.
  369. */
  370. static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev,
  371. struct pci_dev *pdev,
  372. struct pci_driver *driver)
  373. {
  374. if (!driver->err_handler->resume || !edev->in_error)
  375. return PCI_ERS_RESULT_NONE;
  376. eeh_edev_info(edev, "Invoking %s->resume()", driver->name);
  377. driver->err_handler->resume(pdev);
  378. pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED);
  379. #ifdef CONFIG_PCI_IOV
  380. if (eeh_ops->notify_resume)
  381. eeh_ops->notify_resume(edev);
  382. #endif
  383. return PCI_ERS_RESULT_NONE;
  384. }
  385. /**
  386. * eeh_report_failure - Tell device driver that device is dead.
  387. * @edev: eeh device
  388. * @driver: device's PCI driver
  389. *
  390. * This informs the device driver that the device is permanently
  391. * dead, and that no further recovery attempts will be made on it.
  392. */
  393. static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev,
  394. struct pci_dev *pdev,
  395. struct pci_driver *driver)
  396. {
  397. enum pci_ers_result rc;
  398. if (!driver->err_handler->error_detected)
  399. return PCI_ERS_RESULT_NONE;
  400. eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)",
  401. driver->name);
  402. rc = driver->err_handler->error_detected(pdev,
  403. pci_channel_io_perm_failure);
  404. pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT);
  405. return rc;
  406. }
  407. static void *eeh_add_virt_device(struct eeh_dev *edev)
  408. {
  409. struct pci_driver *driver;
  410. struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
  411. if (!(edev->physfn)) {
  412. eeh_edev_warn(edev, "Not for VF\n");
  413. return NULL;
  414. }
  415. driver = eeh_pcid_get(dev);
  416. if (driver) {
  417. if (driver->err_handler) {
  418. eeh_pcid_put(dev);
  419. return NULL;
  420. }
  421. eeh_pcid_put(dev);
  422. }
  423. #ifdef CONFIG_PCI_IOV
  424. pci_iov_add_virtfn(edev->physfn, edev->vf_index);
  425. #endif
  426. return NULL;
  427. }
  428. static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
  429. {
  430. struct pci_driver *driver;
  431. struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
  432. struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
  433. /*
  434. * Actually, we should remove the PCI bridges as well.
  435. * However, that's lots of complexity to do that,
  436. * particularly some of devices under the bridge might
  437. * support EEH. So we just care about PCI devices for
  438. * simplicity here.
  439. */
  440. if (!eeh_edev_actionable(edev) ||
  441. (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
  442. return;
  443. if (rmv_data) {
  444. driver = eeh_pcid_get(dev);
  445. if (driver) {
  446. if (driver->err_handler &&
  447. driver->err_handler->error_detected &&
  448. driver->err_handler->slot_reset) {
  449. eeh_pcid_put(dev);
  450. return;
  451. }
  452. eeh_pcid_put(dev);
  453. }
  454. }
  455. /* Remove it from PCI subsystem */
  456. pr_info("EEH: Removing %s without EEH sensitive driver\n",
  457. pci_name(dev));
  458. edev->mode |= EEH_DEV_DISCONNECTED;
  459. if (rmv_data)
  460. rmv_data->removed_dev_count++;
  461. if (edev->physfn) {
  462. #ifdef CONFIG_PCI_IOV
  463. pci_iov_remove_virtfn(edev->physfn, edev->vf_index);
  464. edev->pdev = NULL;
  465. #endif
  466. if (rmv_data)
  467. list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
  468. } else {
  469. pci_lock_rescan_remove();
  470. pci_stop_and_remove_bus_device(dev);
  471. pci_unlock_rescan_remove();
  472. }
  473. }
  474. static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata)
  475. {
  476. struct eeh_dev *edev, *tmp;
  477. eeh_pe_for_each_dev(pe, edev, tmp) {
  478. if (!(edev->mode & EEH_DEV_DISCONNECTED))
  479. continue;
  480. edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
  481. eeh_pe_tree_remove(edev);
  482. }
  483. return NULL;
  484. }
  485. /*
  486. * Explicitly clear PE's frozen state for PowerNV where
  487. * we have frozen PE until BAR restore is completed. It's
  488. * harmless to clear it for pSeries. To be consistent with
  489. * PE reset (for 3 times), we try to clear the frozen state
  490. * for 3 times as well.
  491. */
  492. static int eeh_clear_pe_frozen_state(struct eeh_pe *root, bool include_passed)
  493. {
  494. struct eeh_pe *pe;
  495. int i;
  496. eeh_for_each_pe(root, pe) {
  497. if (include_passed || !eeh_pe_passed(pe)) {
  498. for (i = 0; i < 3; i++)
  499. if (!eeh_unfreeze_pe(pe))
  500. break;
  501. if (i >= 3)
  502. return -EIO;
  503. }
  504. }
  505. eeh_pe_state_clear(root, EEH_PE_ISOLATED, include_passed);
  506. return 0;
  507. }
  508. int eeh_pe_reset_and_recover(struct eeh_pe *pe)
  509. {
  510. int ret;
  511. /* Bail if the PE is being recovered */
  512. if (pe->state & EEH_PE_RECOVERING)
  513. return 0;
  514. /* Put the PE into recovery mode */
  515. eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
  516. /* Save states */
  517. eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
  518. /* Issue reset */
  519. ret = eeh_pe_reset_full(pe, true);
  520. if (ret) {
  521. eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
  522. return ret;
  523. }
  524. /* Unfreeze the PE */
  525. ret = eeh_clear_pe_frozen_state(pe, true);
  526. if (ret) {
  527. eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
  528. return ret;
  529. }
  530. /* Restore device state */
  531. eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
  532. /* Clear recovery mode */
  533. eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
  534. return 0;
  535. }
  536. /**
  537. * eeh_reset_device - Perform actual reset of a pci slot
  538. * @driver_eeh_aware: Does the device's driver provide EEH support?
  539. * @pe: EEH PE
  540. * @bus: PCI bus corresponding to the isolcated slot
  541. * @rmv_data: Optional, list to record removed devices
  542. *
  543. * This routine must be called to do reset on the indicated PE.
  544. * During the reset, udev might be invoked because those affected
  545. * PCI devices will be removed and then added.
  546. */
  547. static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
  548. struct eeh_rmv_data *rmv_data,
  549. bool driver_eeh_aware)
  550. {
  551. time64_t tstamp;
  552. int cnt, rc;
  553. struct eeh_dev *edev;
  554. struct eeh_pe *tmp_pe;
  555. bool any_passed = false;
  556. eeh_for_each_pe(pe, tmp_pe)
  557. any_passed |= eeh_pe_passed(tmp_pe);
  558. /* pcibios will clear the counter; save the value */
  559. cnt = pe->freeze_count;
  560. tstamp = pe->tstamp;
  561. /*
  562. * We don't remove the corresponding PE instances because
  563. * we need the information afterwords. The attached EEH
  564. * devices are expected to be attached soon when calling
  565. * into pci_hp_add_devices().
  566. */
  567. eeh_pe_state_mark(pe, EEH_PE_KEEP);
  568. if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) {
  569. eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
  570. } else {
  571. pci_lock_rescan_remove();
  572. pci_hp_remove_devices(bus);
  573. pci_unlock_rescan_remove();
  574. }
  575. /*
  576. * Reset the pci controller. (Asserts RST#; resets config space).
  577. * Reconfigure bridges and devices. Don't try to bring the system
  578. * up if the reset failed for some reason.
  579. *
  580. * During the reset, it's very dangerous to have uncontrolled PCI
  581. * config accesses. So we prefer to block them. However, controlled
  582. * PCI config accesses initiated from EEH itself are allowed.
  583. */
  584. rc = eeh_pe_reset_full(pe, false);
  585. if (rc)
  586. return rc;
  587. pci_lock_rescan_remove();
  588. /* Restore PE */
  589. eeh_ops->configure_bridge(pe);
  590. eeh_pe_restore_bars(pe);
  591. /* Clear frozen state */
  592. rc = eeh_clear_pe_frozen_state(pe, false);
  593. if (rc) {
  594. pci_unlock_rescan_remove();
  595. return rc;
  596. }
  597. /* Give the system 5 seconds to finish running the user-space
  598. * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
  599. * this is a hack, but if we don't do this, and try to bring
  600. * the device up before the scripts have taken it down,
  601. * potentially weird things happen.
  602. */
  603. if (!driver_eeh_aware || rmv_data->removed_dev_count) {
  604. pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
  605. (driver_eeh_aware ? "partial" : "complete"));
  606. ssleep(5);
  607. /*
  608. * The EEH device is still connected with its parent
  609. * PE. We should disconnect it so the binding can be
  610. * rebuilt when adding PCI devices.
  611. */
  612. edev = list_first_entry(&pe->edevs, struct eeh_dev, entry);
  613. eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
  614. if (pe->type & EEH_PE_VF) {
  615. eeh_add_virt_device(edev);
  616. } else {
  617. if (!driver_eeh_aware)
  618. eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
  619. pci_hp_add_devices(bus);
  620. }
  621. }
  622. eeh_pe_state_clear(pe, EEH_PE_KEEP, true);
  623. pe->tstamp = tstamp;
  624. pe->freeze_count = cnt;
  625. pci_unlock_rescan_remove();
  626. return 0;
  627. }
  628. /* The longest amount of time to wait for a pci device
  629. * to come back on line, in seconds.
  630. */
  631. #define MAX_WAIT_FOR_RECOVERY 300
  632. /* Walks the PE tree after processing an event to remove any stale PEs.
  633. *
  634. * NB: This needs to be recursive to ensure the leaf PEs get removed
  635. * before their parents do. Although this is possible to do recursively
  636. * we don't since this is easier to read and we need to garantee
  637. * the leaf nodes will be handled first.
  638. */
  639. static void eeh_pe_cleanup(struct eeh_pe *pe)
  640. {
  641. struct eeh_pe *child_pe, *tmp;
  642. list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child)
  643. eeh_pe_cleanup(child_pe);
  644. if (pe->state & EEH_PE_KEEP)
  645. return;
  646. if (!(pe->state & EEH_PE_INVALID))
  647. return;
  648. if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) {
  649. list_del(&pe->child);
  650. kfree(pe);
  651. }
  652. }
  653. /**
  654. * eeh_check_slot_presence - Check if a device is still present in a slot
  655. * @pdev: pci_dev to check
  656. *
  657. * This function may return a false positive if we can't determine the slot's
  658. * presence state. This might happen for for PCIe slots if the PE containing
  659. * the upstream bridge is also frozen, or the bridge is part of the same PE
  660. * as the device.
  661. *
  662. * This shouldn't happen often, but you might see it if you hotplug a PCIe
  663. * switch.
  664. */
  665. static bool eeh_slot_presence_check(struct pci_dev *pdev)
  666. {
  667. const struct hotplug_slot_ops *ops;
  668. struct pci_slot *slot;
  669. u8 state;
  670. int rc;
  671. if (!pdev)
  672. return false;
  673. if (pdev->error_state == pci_channel_io_perm_failure)
  674. return false;
  675. slot = pdev->slot;
  676. if (!slot || !slot->hotplug)
  677. return true;
  678. ops = slot->hotplug->ops;
  679. if (!ops || !ops->get_adapter_status)
  680. return true;
  681. /* set the attention indicator while we've got the slot ops */
  682. if (ops->set_attention_status)
  683. ops->set_attention_status(slot->hotplug, 1);
  684. rc = ops->get_adapter_status(slot->hotplug, &state);
  685. if (rc)
  686. return true;
  687. return !!state;
  688. }
  689. static void eeh_clear_slot_attention(struct pci_dev *pdev)
  690. {
  691. const struct hotplug_slot_ops *ops;
  692. struct pci_slot *slot;
  693. if (!pdev)
  694. return;
  695. if (pdev->error_state == pci_channel_io_perm_failure)
  696. return;
  697. slot = pdev->slot;
  698. if (!slot || !slot->hotplug)
  699. return;
  700. ops = slot->hotplug->ops;
  701. if (!ops || !ops->set_attention_status)
  702. return;
  703. ops->set_attention_status(slot->hotplug, 0);
  704. }
  705. /**
  706. * eeh_handle_normal_event - Handle EEH events on a specific PE
  707. * @pe: EEH PE - which should not be used after we return, as it may
  708. * have been invalidated.
  709. *
  710. * Attempts to recover the given PE. If recovery fails or the PE has failed
  711. * too many times, remove the PE.
  712. *
  713. * While PHB detects address or data parity errors on particular PCI
  714. * slot, the associated PE will be frozen. Besides, DMA's occurring
  715. * to wild addresses (which usually happen due to bugs in device
  716. * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
  717. * #PERR or other misc PCI-related errors also can trigger EEH errors.
  718. *
  719. * Recovery process consists of unplugging the device driver (which
  720. * generated hotplug events to userspace), then issuing a PCI #RST to
  721. * the device, then reconfiguring the PCI config space for all bridges
  722. * & devices under this slot, and then finally restarting the device
  723. * drivers (which cause a second set of hotplug events to go out to
  724. * userspace).
  725. */
  726. void eeh_handle_normal_event(struct eeh_pe *pe)
  727. {
  728. struct pci_bus *bus;
  729. struct eeh_dev *edev, *tmp;
  730. struct eeh_pe *tmp_pe;
  731. int rc = 0;
  732. enum pci_ers_result result = PCI_ERS_RESULT_NONE;
  733. struct eeh_rmv_data rmv_data =
  734. {LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
  735. int devices = 0;
  736. bus = eeh_pe_bus_get(pe);
  737. if (!bus) {
  738. pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
  739. __func__, pe->phb->global_number, pe->addr);
  740. return;
  741. }
  742. /*
  743. * When devices are hot-removed we might get an EEH due to
  744. * a driver attempting to touch the MMIO space of a removed
  745. * device. In this case we don't have a device to recover
  746. * so suppress the event if we can't find any present devices.
  747. *
  748. * The hotplug driver should take care of tearing down the
  749. * device itself.
  750. */
  751. eeh_for_each_pe(pe, tmp_pe)
  752. eeh_pe_for_each_dev(tmp_pe, edev, tmp)
  753. if (eeh_slot_presence_check(edev->pdev))
  754. devices++;
  755. if (!devices) {
  756. pr_debug("EEH: Frozen PHB#%x-PE#%x is empty!\n",
  757. pe->phb->global_number, pe->addr);
  758. goto out; /* nothing to recover */
  759. }
  760. /* Log the event */
  761. if (pe->type & EEH_PE_PHB) {
  762. pr_err("EEH: Recovering PHB#%x, location: %s\n",
  763. pe->phb->global_number, eeh_pe_loc_get(pe));
  764. } else {
  765. struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb);
  766. pr_err("EEH: Recovering PHB#%x-PE#%x\n",
  767. pe->phb->global_number, pe->addr);
  768. pr_err("EEH: PE location: %s, PHB location: %s\n",
  769. eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
  770. }
  771. #ifdef CONFIG_STACKTRACE
  772. /*
  773. * Print the saved stack trace now that we've verified there's
  774. * something to recover.
  775. */
  776. if (pe->trace_entries) {
  777. void **ptrs = (void **) pe->stack_trace;
  778. int i;
  779. pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
  780. pe->phb->global_number, pe->addr);
  781. /* FIXME: Use the same format as dump_stack() */
  782. pr_err("EEH: Call Trace:\n");
  783. for (i = 0; i < pe->trace_entries; i++)
  784. pr_err("EEH: [%pK] %pS\n", ptrs[i], ptrs[i]);
  785. pe->trace_entries = 0;
  786. }
  787. #endif /* CONFIG_STACKTRACE */
  788. eeh_pe_update_time_stamp(pe);
  789. pe->freeze_count++;
  790. if (pe->freeze_count > eeh_max_freezes) {
  791. pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
  792. pe->phb->global_number, pe->addr,
  793. pe->freeze_count);
  794. result = PCI_ERS_RESULT_DISCONNECT;
  795. }
  796. eeh_for_each_pe(pe, tmp_pe)
  797. eeh_pe_for_each_dev(tmp_pe, edev, tmp)
  798. edev->mode &= ~EEH_DEV_NO_HANDLER;
  799. /* Walk the various device drivers attached to this slot through
  800. * a reset sequence, giving each an opportunity to do what it needs
  801. * to accomplish the reset. Each child gets a report of the
  802. * status ... if any child can't handle the reset, then the entire
  803. * slot is dlpar removed and added.
  804. *
  805. * When the PHB is fenced, we have to issue a reset to recover from
  806. * the error. Override the result if necessary to have partially
  807. * hotplug for this case.
  808. */
  809. if (result != PCI_ERS_RESULT_DISCONNECT) {
  810. pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
  811. pe->freeze_count, eeh_max_freezes);
  812. pr_info("EEH: Notify device drivers to shutdown\n");
  813. eeh_set_channel_state(pe, pci_channel_io_frozen);
  814. eeh_set_irq_state(pe, false);
  815. eeh_pe_report("error_detected(IO frozen)", pe,
  816. eeh_report_error, &result);
  817. if ((pe->type & EEH_PE_PHB) &&
  818. result != PCI_ERS_RESULT_NONE &&
  819. result != PCI_ERS_RESULT_NEED_RESET)
  820. result = PCI_ERS_RESULT_NEED_RESET;
  821. }
  822. /* Get the current PCI slot state. This can take a long time,
  823. * sometimes over 300 seconds for certain systems.
  824. */
  825. if (result != PCI_ERS_RESULT_DISCONNECT) {
  826. rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
  827. if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
  828. pr_warn("EEH: Permanent failure\n");
  829. result = PCI_ERS_RESULT_DISCONNECT;
  830. }
  831. }
  832. /* Since rtas may enable MMIO when posting the error log,
  833. * don't post the error log until after all dev drivers
  834. * have been informed.
  835. */
  836. if (result != PCI_ERS_RESULT_DISCONNECT) {
  837. pr_info("EEH: Collect temporary log\n");
  838. eeh_slot_error_detail(pe, EEH_LOG_TEMP);
  839. }
  840. /* If all device drivers were EEH-unaware, then shut
  841. * down all of the device drivers, and hope they
  842. * go down willingly, without panicing the system.
  843. */
  844. if (result == PCI_ERS_RESULT_NONE) {
  845. pr_info("EEH: Reset with hotplug activity\n");
  846. rc = eeh_reset_device(pe, bus, NULL, false);
  847. if (rc) {
  848. pr_warn("%s: Unable to reset, err=%d\n",
  849. __func__, rc);
  850. result = PCI_ERS_RESULT_DISCONNECT;
  851. }
  852. }
  853. /* If all devices reported they can proceed, then re-enable MMIO */
  854. if (result == PCI_ERS_RESULT_CAN_RECOVER) {
  855. pr_info("EEH: Enable I/O for affected devices\n");
  856. rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
  857. if (rc < 0) {
  858. result = PCI_ERS_RESULT_DISCONNECT;
  859. } else if (rc) {
  860. result = PCI_ERS_RESULT_NEED_RESET;
  861. } else {
  862. pr_info("EEH: Notify device drivers to resume I/O\n");
  863. eeh_pe_report("mmio_enabled", pe,
  864. eeh_report_mmio_enabled, &result);
  865. }
  866. }
  867. /* If all devices reported they can proceed, then re-enable DMA */
  868. if (result == PCI_ERS_RESULT_CAN_RECOVER) {
  869. pr_info("EEH: Enabled DMA for affected devices\n");
  870. rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
  871. if (rc < 0) {
  872. result = PCI_ERS_RESULT_DISCONNECT;
  873. } else if (rc) {
  874. result = PCI_ERS_RESULT_NEED_RESET;
  875. } else {
  876. /*
  877. * We didn't do PE reset for the case. The PE
  878. * is still in frozen state. Clear it before
  879. * resuming the PE.
  880. */
  881. eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
  882. result = PCI_ERS_RESULT_RECOVERED;
  883. }
  884. }
  885. /* If any device called out for a reset, then reset the slot */
  886. if (result == PCI_ERS_RESULT_NEED_RESET) {
  887. pr_info("EEH: Reset without hotplug activity\n");
  888. rc = eeh_reset_device(pe, bus, &rmv_data, true);
  889. if (rc) {
  890. pr_warn("%s: Cannot reset, err=%d\n",
  891. __func__, rc);
  892. result = PCI_ERS_RESULT_DISCONNECT;
  893. } else {
  894. result = PCI_ERS_RESULT_NONE;
  895. eeh_set_channel_state(pe, pci_channel_io_normal);
  896. eeh_set_irq_state(pe, true);
  897. eeh_pe_report("slot_reset", pe, eeh_report_reset,
  898. &result);
  899. }
  900. }
  901. if ((result == PCI_ERS_RESULT_RECOVERED) ||
  902. (result == PCI_ERS_RESULT_NONE)) {
  903. /*
  904. * For those hot removed VFs, we should add back them after PF
  905. * get recovered properly.
  906. */
  907. list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list,
  908. rmv_entry) {
  909. eeh_add_virt_device(edev);
  910. list_del(&edev->rmv_entry);
  911. }
  912. /* Tell all device drivers that they can resume operations */
  913. pr_info("EEH: Notify device driver to resume\n");
  914. eeh_set_channel_state(pe, pci_channel_io_normal);
  915. eeh_set_irq_state(pe, true);
  916. eeh_pe_report("resume", pe, eeh_report_resume, NULL);
  917. eeh_for_each_pe(pe, tmp_pe) {
  918. eeh_pe_for_each_dev(tmp_pe, edev, tmp) {
  919. edev->mode &= ~EEH_DEV_NO_HANDLER;
  920. edev->in_error = false;
  921. }
  922. }
  923. pr_info("EEH: Recovery successful.\n");
  924. } else {
  925. /*
  926. * About 90% of all real-life EEH failures in the field
  927. * are due to poorly seated PCI cards. Only 10% or so are
  928. * due to actual, failed cards.
  929. */
  930. pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
  931. "Please try reseating or replacing it\n",
  932. pe->phb->global_number, pe->addr);
  933. eeh_slot_error_detail(pe, EEH_LOG_PERM);
  934. /* Notify all devices that they're about to go down. */
  935. eeh_set_channel_state(pe, pci_channel_io_perm_failure);
  936. eeh_set_irq_state(pe, false);
  937. eeh_pe_report("error_detected(permanent failure)", pe,
  938. eeh_report_failure, NULL);
  939. /* Mark the PE to be removed permanently */
  940. eeh_pe_state_mark(pe, EEH_PE_REMOVED);
  941. /*
  942. * Shut down the device drivers for good. We mark
  943. * all removed devices correctly to avoid access
  944. * the their PCI config any more.
  945. */
  946. if (pe->type & EEH_PE_VF) {
  947. eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
  948. eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
  949. } else {
  950. eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
  951. eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
  952. pci_lock_rescan_remove();
  953. pci_hp_remove_devices(bus);
  954. pci_unlock_rescan_remove();
  955. /* The passed PE should no longer be used */
  956. return;
  957. }
  958. }
  959. out:
  960. /*
  961. * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING
  962. * we don't want to modify the PE tree structure so we do it here.
  963. */
  964. eeh_pe_cleanup(pe);
  965. /* clear the slot attention LED for all recovered devices */
  966. eeh_for_each_pe(pe, tmp_pe)
  967. eeh_pe_for_each_dev(tmp_pe, edev, tmp)
  968. eeh_clear_slot_attention(edev->pdev);
  969. eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
  970. }
  971. /**
  972. * eeh_handle_special_event - Handle EEH events without a specific failing PE
  973. *
  974. * Called when an EEH event is detected but can't be narrowed down to a
  975. * specific PE. Iterates through possible failures and handles them as
  976. * necessary.
  977. */
  978. void eeh_handle_special_event(void)
  979. {
  980. struct eeh_pe *pe, *phb_pe, *tmp_pe;
  981. struct eeh_dev *edev, *tmp_edev;
  982. struct pci_bus *bus;
  983. struct pci_controller *hose;
  984. unsigned long flags;
  985. int rc;
  986. do {
  987. rc = eeh_ops->next_error(&pe);
  988. switch (rc) {
  989. case EEH_NEXT_ERR_DEAD_IOC:
  990. /* Mark all PHBs in dead state */
  991. eeh_serialize_lock(&flags);
  992. /* Purge all events */
  993. eeh_remove_event(NULL, true);
  994. list_for_each_entry(hose, &hose_list, list_node) {
  995. phb_pe = eeh_phb_pe_get(hose);
  996. if (!phb_pe) continue;
  997. eeh_pe_mark_isolated(phb_pe);
  998. }
  999. eeh_serialize_unlock(flags);
  1000. break;
  1001. case EEH_NEXT_ERR_FROZEN_PE:
  1002. case EEH_NEXT_ERR_FENCED_PHB:
  1003. case EEH_NEXT_ERR_DEAD_PHB:
  1004. /* Mark the PE in fenced state */
  1005. eeh_serialize_lock(&flags);
  1006. /* Purge all events of the PHB */
  1007. eeh_remove_event(pe, true);
  1008. if (rc != EEH_NEXT_ERR_DEAD_PHB)
  1009. eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
  1010. eeh_pe_mark_isolated(pe);
  1011. eeh_serialize_unlock(flags);
  1012. break;
  1013. case EEH_NEXT_ERR_NONE:
  1014. return;
  1015. default:
  1016. pr_warn("%s: Invalid value %d from next_error()\n",
  1017. __func__, rc);
  1018. return;
  1019. }
  1020. /*
  1021. * For fenced PHB and frozen PE, it's handled as normal
  1022. * event. We have to remove the affected PHBs for dead
  1023. * PHB and IOC
  1024. */
  1025. if (rc == EEH_NEXT_ERR_FROZEN_PE ||
  1026. rc == EEH_NEXT_ERR_FENCED_PHB) {
  1027. eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
  1028. eeh_handle_normal_event(pe);
  1029. } else {
  1030. eeh_for_each_pe(pe, tmp_pe)
  1031. eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
  1032. edev->mode &= ~EEH_DEV_NO_HANDLER;
  1033. /* Notify all devices to be down */
  1034. eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
  1035. eeh_set_channel_state(pe, pci_channel_io_perm_failure);
  1036. eeh_pe_report(
  1037. "error_detected(permanent failure)", pe,
  1038. eeh_report_failure, NULL);
  1039. pci_lock_rescan_remove();
  1040. list_for_each_entry(hose, &hose_list, list_node) {
  1041. phb_pe = eeh_phb_pe_get(hose);
  1042. if (!phb_pe ||
  1043. !(phb_pe->state & EEH_PE_ISOLATED) ||
  1044. (phb_pe->state & EEH_PE_RECOVERING))
  1045. continue;
  1046. bus = eeh_pe_bus_get(phb_pe);
  1047. if (!bus) {
  1048. pr_err("%s: Cannot find PCI bus for "
  1049. "PHB#%x-PE#%x\n",
  1050. __func__,
  1051. pe->phb->global_number,
  1052. pe->addr);
  1053. break;
  1054. }
  1055. pci_hp_remove_devices(bus);
  1056. }
  1057. pci_unlock_rescan_remove();
  1058. }
  1059. /*
  1060. * If we have detected dead IOC, we needn't proceed
  1061. * any more since all PHBs would have been removed
  1062. */
  1063. if (rc == EEH_NEXT_ERR_DEAD_IOC)
  1064. break;
  1065. } while (rc != EEH_NEXT_ERR_NONE);
  1066. }