dpc.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCI Express Downstream Port Containment services driver
  4. * Author: Keith Busch <keith.busch@intel.com>
  5. *
  6. * Copyright (C) 2016 Intel Corp.
  7. */
  8. #define dev_fmt(fmt) "DPC: " fmt
  9. #include <linux/aer.h>
  10. #include <linux/delay.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/init.h>
  13. #include <linux/pci.h>
  14. #include "portdrv.h"
  15. #include "../pci.h"
  16. static const char * const rp_pio_error_string[] = {
  17. "Configuration Request received UR Completion", /* Bit Position 0 */
  18. "Configuration Request received CA Completion", /* Bit Position 1 */
  19. "Configuration Request Completion Timeout", /* Bit Position 2 */
  20. NULL,
  21. NULL,
  22. NULL,
  23. NULL,
  24. NULL,
  25. "I/O Request received UR Completion", /* Bit Position 8 */
  26. "I/O Request received CA Completion", /* Bit Position 9 */
  27. "I/O Request Completion Timeout", /* Bit Position 10 */
  28. NULL,
  29. NULL,
  30. NULL,
  31. NULL,
  32. NULL,
  33. "Memory Request received UR Completion", /* Bit Position 16 */
  34. "Memory Request received CA Completion", /* Bit Position 17 */
  35. "Memory Request Completion Timeout", /* Bit Position 18 */
  36. };
  37. void pci_save_dpc_state(struct pci_dev *dev)
  38. {
  39. struct pci_cap_saved_state *save_state;
  40. u16 *cap;
  41. if (!pci_is_pcie(dev))
  42. return;
  43. save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
  44. if (!save_state)
  45. return;
  46. cap = (u16 *)&save_state->cap.data[0];
  47. pci_read_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, cap);
  48. }
  49. void pci_restore_dpc_state(struct pci_dev *dev)
  50. {
  51. struct pci_cap_saved_state *save_state;
  52. u16 *cap;
  53. if (!pci_is_pcie(dev))
  54. return;
  55. save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
  56. if (!save_state)
  57. return;
  58. cap = (u16 *)&save_state->cap.data[0];
  59. pci_write_config_word(dev, dev->dpc_cap + PCI_EXP_DPC_CTL, *cap);
  60. }
  61. static DECLARE_WAIT_QUEUE_HEAD(dpc_completed_waitqueue);
  62. #ifdef CONFIG_HOTPLUG_PCI_PCIE
  63. static bool dpc_completed(struct pci_dev *pdev)
  64. {
  65. u16 status;
  66. pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
  67. if ((status != 0xffff) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
  68. return false;
  69. if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags))
  70. return false;
  71. return true;
  72. }
  73. /**
  74. * pci_dpc_recovered - whether DPC triggered and has recovered successfully
  75. * @pdev: PCI device
  76. *
  77. * Return true if DPC was triggered for @pdev and has recovered successfully.
  78. * Wait for recovery if it hasn't completed yet. Called from the PCIe hotplug
  79. * driver to recognize and ignore Link Down/Up events caused by DPC.
  80. */
  81. bool pci_dpc_recovered(struct pci_dev *pdev)
  82. {
  83. struct pci_host_bridge *host;
  84. if (!pdev->dpc_cap)
  85. return false;
  86. /*
  87. * Synchronization between hotplug and DPC is not supported
  88. * if DPC is owned by firmware and EDR is not enabled.
  89. */
  90. host = pci_find_host_bridge(pdev->bus);
  91. if (!host->native_dpc && !IS_ENABLED(CONFIG_PCIE_EDR))
  92. return false;
  93. /*
  94. * Need a timeout in case DPC never completes due to failure of
  95. * dpc_wait_rp_inactive(). The spec doesn't mandate a time limit,
  96. * but reports indicate that DPC completes within 4 seconds.
  97. */
  98. wait_event_timeout(dpc_completed_waitqueue, dpc_completed(pdev),
  99. msecs_to_jiffies(4000));
  100. return test_and_clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
  101. }
  102. #endif /* CONFIG_HOTPLUG_PCI_PCIE */
  103. static int dpc_wait_rp_inactive(struct pci_dev *pdev)
  104. {
  105. unsigned long timeout = jiffies + HZ;
  106. u16 cap = pdev->dpc_cap, status;
  107. pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
  108. while (status & PCI_EXP_DPC_RP_BUSY &&
  109. !time_after(jiffies, timeout)) {
  110. msleep(10);
  111. pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
  112. }
  113. if (status & PCI_EXP_DPC_RP_BUSY) {
  114. pci_warn(pdev, "root port still busy\n");
  115. return -EBUSY;
  116. }
  117. return 0;
  118. }
  119. pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
  120. {
  121. pci_ers_result_t ret;
  122. u16 cap;
  123. set_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
  124. /*
  125. * DPC disables the Link automatically in hardware, so it has
  126. * already been reset by the time we get here.
  127. */
  128. cap = pdev->dpc_cap;
  129. /*
  130. * Wait until the Link is inactive, then clear DPC Trigger Status
  131. * to allow the Port to leave DPC.
  132. */
  133. if (!pcie_wait_for_link(pdev, false))
  134. pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
  135. if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev)) {
  136. clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
  137. ret = PCI_ERS_RESULT_DISCONNECT;
  138. goto out;
  139. }
  140. pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
  141. PCI_EXP_DPC_STATUS_TRIGGER);
  142. if (!pcie_wait_for_link(pdev, true)) {
  143. pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n");
  144. clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
  145. ret = PCI_ERS_RESULT_DISCONNECT;
  146. } else {
  147. set_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
  148. ret = PCI_ERS_RESULT_RECOVERED;
  149. }
  150. out:
  151. clear_bit(PCI_DPC_RECOVERING, &pdev->priv_flags);
  152. wake_up_all(&dpc_completed_waitqueue);
  153. return ret;
  154. }
  155. static void dpc_process_rp_pio_error(struct pci_dev *pdev)
  156. {
  157. u16 cap = pdev->dpc_cap, dpc_status, first_error;
  158. u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix;
  159. int i;
  160. pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status);
  161. pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask);
  162. pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n",
  163. status, mask);
  164. pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev);
  165. pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr);
  166. pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc);
  167. pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n",
  168. sev, syserr, exc);
  169. /* Get First Error Pointer */
  170. pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
  171. first_error = (dpc_status & 0x1f00) >> 8;
  172. for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
  173. if ((status & ~mask) & (1 << i))
  174. pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i],
  175. first_error == i ? " (First)" : "");
  176. }
  177. if (pdev->dpc_rp_log_size < 4)
  178. goto clear_status;
  179. pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG,
  180. &dw0);
  181. pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 4,
  182. &dw1);
  183. pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 8,
  184. &dw2);
  185. pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12,
  186. &dw3);
  187. pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n",
  188. dw0, dw1, dw2, dw3);
  189. if (pdev->dpc_rp_log_size < 5)
  190. goto clear_status;
  191. pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log);
  192. pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log);
  193. for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
  194. pci_read_config_dword(pdev,
  195. cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
  196. pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
  197. }
  198. clear_status:
  199. pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status);
  200. }
  201. static int dpc_get_aer_uncorrect_severity(struct pci_dev *dev,
  202. struct aer_err_info *info)
  203. {
  204. int pos = dev->aer_cap;
  205. u32 status, mask, sev;
  206. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  207. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
  208. status &= ~mask;
  209. if (!status)
  210. return 0;
  211. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev);
  212. status &= sev;
  213. if (status)
  214. info->severity = AER_FATAL;
  215. else
  216. info->severity = AER_NONFATAL;
  217. return 1;
  218. }
  219. void dpc_process_error(struct pci_dev *pdev)
  220. {
  221. u16 cap = pdev->dpc_cap, status, source, reason, ext_reason;
  222. struct aer_err_info info;
  223. pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
  224. pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source);
  225. pci_info(pdev, "containment event, status:%#06x source:%#06x\n",
  226. status, source);
  227. reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
  228. ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
  229. pci_warn(pdev, "%s detected\n",
  230. (reason == 0) ? "unmasked uncorrectable error" :
  231. (reason == 1) ? "ERR_NONFATAL" :
  232. (reason == 2) ? "ERR_FATAL" :
  233. (ext_reason == 0) ? "RP PIO error" :
  234. (ext_reason == 1) ? "software trigger" :
  235. "reserved error");
  236. /* show RP PIO error detail information */
  237. if (pdev->dpc_rp_extensions && reason == 3 && ext_reason == 0)
  238. dpc_process_rp_pio_error(pdev);
  239. else if (reason == 0 &&
  240. dpc_get_aer_uncorrect_severity(pdev, &info) &&
  241. aer_get_device_error_info(pdev, &info)) {
  242. aer_print_error(pdev, &info);
  243. pci_aer_clear_nonfatal_status(pdev);
  244. pci_aer_clear_fatal_status(pdev);
  245. }
  246. }
  247. static irqreturn_t dpc_handler(int irq, void *context)
  248. {
  249. struct pci_dev *pdev = context;
  250. dpc_process_error(pdev);
  251. /* We configure DPC so it only triggers on ERR_FATAL */
  252. pcie_do_recovery(pdev, pci_channel_io_frozen, dpc_reset_link);
  253. return IRQ_HANDLED;
  254. }
  255. static irqreturn_t dpc_irq(int irq, void *context)
  256. {
  257. struct pci_dev *pdev = context;
  258. u16 cap = pdev->dpc_cap, status;
  259. pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
  260. if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || status == (u16)(~0))
  261. return IRQ_NONE;
  262. pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
  263. PCI_EXP_DPC_STATUS_INTERRUPT);
  264. if (status & PCI_EXP_DPC_STATUS_TRIGGER)
  265. return IRQ_WAKE_THREAD;
  266. return IRQ_HANDLED;
  267. }
  268. void pci_dpc_init(struct pci_dev *pdev)
  269. {
  270. u16 cap;
  271. pdev->dpc_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC);
  272. if (!pdev->dpc_cap)
  273. return;
  274. pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
  275. if (!(cap & PCI_EXP_DPC_CAP_RP_EXT))
  276. return;
  277. pdev->dpc_rp_extensions = true;
  278. pdev->dpc_rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
  279. if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
  280. pci_err(pdev, "RP PIO log size %u is invalid\n",
  281. pdev->dpc_rp_log_size);
  282. pdev->dpc_rp_log_size = 0;
  283. }
  284. }
  285. #define FLAG(x, y) (((x) & (y)) ? '+' : '-')
  286. static int dpc_probe(struct pcie_device *dev)
  287. {
  288. struct pci_dev *pdev = dev->port;
  289. struct device *device = &dev->device;
  290. int status;
  291. u16 ctl, cap;
  292. if (!pcie_aer_is_native(pdev) && !pcie_ports_dpc_native)
  293. return -ENOTSUPP;
  294. status = devm_request_threaded_irq(device, dev->irq, dpc_irq,
  295. dpc_handler, IRQF_SHARED,
  296. "pcie-dpc", pdev);
  297. if (status) {
  298. pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq,
  299. status);
  300. return status;
  301. }
  302. pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CAP, &cap);
  303. pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
  304. ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN;
  305. pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
  306. pci_info(pdev, "enabled with IRQ %d\n", dev->irq);
  307. pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n",
  308. cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT),
  309. FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
  310. FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), pdev->dpc_rp_log_size,
  311. FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
  312. pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
  313. return status;
  314. }
  315. static void dpc_remove(struct pcie_device *dev)
  316. {
  317. struct pci_dev *pdev = dev->port;
  318. u16 ctl;
  319. pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, &ctl);
  320. ctl &= ~(PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN);
  321. pci_write_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_CTL, ctl);
  322. }
  323. static struct pcie_port_service_driver dpcdriver = {
  324. .name = "dpc",
  325. .port_type = PCIE_ANY_PORT,
  326. .service = PCIE_PORT_SERVICE_DPC,
  327. .probe = dpc_probe,
  328. .remove = dpc_remove,
  329. };
  330. int __init pcie_dpc_init(void)
  331. {
  332. return pcie_port_service_register(&dpcdriver);
  333. }