pci-acpi.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * PCI support in ACPI
  4. *
  5. * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
  6. * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
  7. * Copyright (C) 2004 Intel Corp.
  8. */
  9. #include <linux/delay.h>
  10. #include <linux/init.h>
  11. #include <linux/irqdomain.h>
  12. #include <linux/pci.h>
  13. #include <linux/msi.h>
  14. #include <linux/pci_hotplug.h>
  15. #include <linux/module.h>
  16. #include <linux/pci-acpi.h>
  17. #include <linux/pm_runtime.h>
  18. #include <linux/pm_qos.h>
  19. #include "pci.h"
  20. /*
  21. * The GUID is defined in the PCI Firmware Specification available here:
  22. * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
  23. */
  24. const guid_t pci_acpi_dsm_guid =
  25. GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
  26. 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
  27. #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
  28. static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
  29. {
  30. struct device *dev = &adev->dev;
  31. struct resource_entry *entry;
  32. struct list_head list;
  33. unsigned long flags;
  34. int ret;
  35. INIT_LIST_HEAD(&list);
  36. flags = IORESOURCE_MEM;
  37. ret = acpi_dev_get_resources(adev, &list,
  38. acpi_dev_filter_resource_type_cb,
  39. (void *) flags);
  40. if (ret < 0) {
  41. dev_err(dev, "failed to parse _CRS method, error code %d\n",
  42. ret);
  43. return ret;
  44. }
  45. if (ret == 0) {
  46. dev_err(dev, "no IO and memory resources present in _CRS\n");
  47. return -EINVAL;
  48. }
  49. entry = list_first_entry(&list, struct resource_entry, node);
  50. *res = *entry->res;
  51. acpi_dev_free_resource_list(&list);
  52. return 0;
  53. }
  54. static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
  55. void **retval)
  56. {
  57. u16 *segment = context;
  58. unsigned long long uid;
  59. acpi_status status;
  60. status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
  61. if (ACPI_FAILURE(status) || uid != *segment)
  62. return AE_CTRL_DEPTH;
  63. *(acpi_handle *)retval = handle;
  64. return AE_CTRL_TERMINATE;
  65. }
  66. int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
  67. struct resource *res)
  68. {
  69. struct acpi_device *adev;
  70. acpi_status status;
  71. acpi_handle handle;
  72. int ret;
  73. status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
  74. if (ACPI_FAILURE(status)) {
  75. dev_err(dev, "can't find _HID %s device to locate resources\n",
  76. hid);
  77. return -ENODEV;
  78. }
  79. ret = acpi_bus_get_device(handle, &adev);
  80. if (ret)
  81. return ret;
  82. ret = acpi_get_rc_addr(adev, res);
  83. if (ret) {
  84. dev_err(dev, "can't get resource from %s\n",
  85. dev_name(&adev->dev));
  86. return ret;
  87. }
  88. return 0;
  89. }
  90. #endif
  91. phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
  92. {
  93. acpi_status status = AE_NOT_EXIST;
  94. unsigned long long mcfg_addr;
  95. if (handle)
  96. status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
  97. NULL, &mcfg_addr);
  98. if (ACPI_FAILURE(status))
  99. return 0;
  100. return (phys_addr_t)mcfg_addr;
  101. }
  102. /* _HPX PCI Setting Record (Type 0); same as _HPP */
  103. struct hpx_type0 {
  104. u32 revision; /* Not present in _HPP */
  105. u8 cache_line_size; /* Not applicable to PCIe */
  106. u8 latency_timer; /* Not applicable to PCIe */
  107. u8 enable_serr;
  108. u8 enable_perr;
  109. };
  110. static struct hpx_type0 pci_default_type0 = {
  111. .revision = 1,
  112. .cache_line_size = 8,
  113. .latency_timer = 0x40,
  114. .enable_serr = 0,
  115. .enable_perr = 0,
  116. };
  117. static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
  118. {
  119. u16 pci_cmd, pci_bctl;
  120. if (!hpx)
  121. hpx = &pci_default_type0;
  122. if (hpx->revision > 1) {
  123. pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
  124. hpx->revision);
  125. hpx = &pci_default_type0;
  126. }
  127. pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
  128. pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
  129. pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
  130. if (hpx->enable_serr)
  131. pci_cmd |= PCI_COMMAND_SERR;
  132. if (hpx->enable_perr)
  133. pci_cmd |= PCI_COMMAND_PARITY;
  134. pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
  135. /* Program bridge control value */
  136. if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
  137. pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
  138. hpx->latency_timer);
  139. pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
  140. if (hpx->enable_perr)
  141. pci_bctl |= PCI_BRIDGE_CTL_PARITY;
  142. pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
  143. }
  144. }
  145. static acpi_status decode_type0_hpx_record(union acpi_object *record,
  146. struct hpx_type0 *hpx0)
  147. {
  148. int i;
  149. union acpi_object *fields = record->package.elements;
  150. u32 revision = fields[1].integer.value;
  151. switch (revision) {
  152. case 1:
  153. if (record->package.count != 6)
  154. return AE_ERROR;
  155. for (i = 2; i < 6; i++)
  156. if (fields[i].type != ACPI_TYPE_INTEGER)
  157. return AE_ERROR;
  158. hpx0->revision = revision;
  159. hpx0->cache_line_size = fields[2].integer.value;
  160. hpx0->latency_timer = fields[3].integer.value;
  161. hpx0->enable_serr = fields[4].integer.value;
  162. hpx0->enable_perr = fields[5].integer.value;
  163. break;
  164. default:
  165. pr_warn("%s: Type 0 Revision %d record not supported\n",
  166. __func__, revision);
  167. return AE_ERROR;
  168. }
  169. return AE_OK;
  170. }
  171. /* _HPX PCI-X Setting Record (Type 1) */
  172. struct hpx_type1 {
  173. u32 revision;
  174. u8 max_mem_read;
  175. u8 avg_max_split;
  176. u16 tot_max_split;
  177. };
  178. static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
  179. {
  180. int pos;
  181. if (!hpx)
  182. return;
  183. pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
  184. if (!pos)
  185. return;
  186. pci_warn(dev, "PCI-X settings not supported\n");
  187. }
  188. static acpi_status decode_type1_hpx_record(union acpi_object *record,
  189. struct hpx_type1 *hpx1)
  190. {
  191. int i;
  192. union acpi_object *fields = record->package.elements;
  193. u32 revision = fields[1].integer.value;
  194. switch (revision) {
  195. case 1:
  196. if (record->package.count != 5)
  197. return AE_ERROR;
  198. for (i = 2; i < 5; i++)
  199. if (fields[i].type != ACPI_TYPE_INTEGER)
  200. return AE_ERROR;
  201. hpx1->revision = revision;
  202. hpx1->max_mem_read = fields[2].integer.value;
  203. hpx1->avg_max_split = fields[3].integer.value;
  204. hpx1->tot_max_split = fields[4].integer.value;
  205. break;
  206. default:
  207. pr_warn("%s: Type 1 Revision %d record not supported\n",
  208. __func__, revision);
  209. return AE_ERROR;
  210. }
  211. return AE_OK;
  212. }
  213. static bool pcie_root_rcb_set(struct pci_dev *dev)
  214. {
  215. struct pci_dev *rp = pcie_find_root_port(dev);
  216. u16 lnkctl;
  217. if (!rp)
  218. return false;
  219. pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
  220. if (lnkctl & PCI_EXP_LNKCTL_RCB)
  221. return true;
  222. return false;
  223. }
  224. /* _HPX PCI Express Setting Record (Type 2) */
  225. struct hpx_type2 {
  226. u32 revision;
  227. u32 unc_err_mask_and;
  228. u32 unc_err_mask_or;
  229. u32 unc_err_sever_and;
  230. u32 unc_err_sever_or;
  231. u32 cor_err_mask_and;
  232. u32 cor_err_mask_or;
  233. u32 adv_err_cap_and;
  234. u32 adv_err_cap_or;
  235. u16 pci_exp_devctl_and;
  236. u16 pci_exp_devctl_or;
  237. u16 pci_exp_lnkctl_and;
  238. u16 pci_exp_lnkctl_or;
  239. u32 sec_unc_err_sever_and;
  240. u32 sec_unc_err_sever_or;
  241. u32 sec_unc_err_mask_and;
  242. u32 sec_unc_err_mask_or;
  243. };
  244. static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
  245. {
  246. int pos;
  247. u32 reg32;
  248. if (!hpx)
  249. return;
  250. if (!pci_is_pcie(dev))
  251. return;
  252. if (hpx->revision > 1) {
  253. pci_warn(dev, "PCIe settings rev %d not supported\n",
  254. hpx->revision);
  255. return;
  256. }
  257. /*
  258. * Don't allow _HPX to change MPS or MRRS settings. We manage
  259. * those to make sure they're consistent with the rest of the
  260. * platform.
  261. */
  262. hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
  263. PCI_EXP_DEVCTL_READRQ;
  264. hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
  265. PCI_EXP_DEVCTL_READRQ);
  266. /* Initialize Device Control Register */
  267. pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
  268. ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
  269. /* Initialize Link Control Register */
  270. if (pcie_cap_has_lnkctl(dev)) {
  271. /*
  272. * If the Root Port supports Read Completion Boundary of
  273. * 128, set RCB to 128. Otherwise, clear it.
  274. */
  275. hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
  276. hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
  277. if (pcie_root_rcb_set(dev))
  278. hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
  279. pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
  280. ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
  281. }
  282. /* Find Advanced Error Reporting Enhanced Capability */
  283. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  284. if (!pos)
  285. return;
  286. /* Initialize Uncorrectable Error Mask Register */
  287. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
  288. reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
  289. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
  290. /* Initialize Uncorrectable Error Severity Register */
  291. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
  292. reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
  293. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
  294. /* Initialize Correctable Error Mask Register */
  295. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
  296. reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
  297. pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
  298. /* Initialize Advanced Error Capabilities and Control Register */
  299. pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
  300. reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
  301. /* Don't enable ECRC generation or checking if unsupported */
  302. if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
  303. reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
  304. if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
  305. reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
  306. pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
  307. /*
  308. * FIXME: The following two registers are not supported yet.
  309. *
  310. * o Secondary Uncorrectable Error Severity Register
  311. * o Secondary Uncorrectable Error Mask Register
  312. */
  313. }
  314. static acpi_status decode_type2_hpx_record(union acpi_object *record,
  315. struct hpx_type2 *hpx2)
  316. {
  317. int i;
  318. union acpi_object *fields = record->package.elements;
  319. u32 revision = fields[1].integer.value;
  320. switch (revision) {
  321. case 1:
  322. if (record->package.count != 18)
  323. return AE_ERROR;
  324. for (i = 2; i < 18; i++)
  325. if (fields[i].type != ACPI_TYPE_INTEGER)
  326. return AE_ERROR;
  327. hpx2->revision = revision;
  328. hpx2->unc_err_mask_and = fields[2].integer.value;
  329. hpx2->unc_err_mask_or = fields[3].integer.value;
  330. hpx2->unc_err_sever_and = fields[4].integer.value;
  331. hpx2->unc_err_sever_or = fields[5].integer.value;
  332. hpx2->cor_err_mask_and = fields[6].integer.value;
  333. hpx2->cor_err_mask_or = fields[7].integer.value;
  334. hpx2->adv_err_cap_and = fields[8].integer.value;
  335. hpx2->adv_err_cap_or = fields[9].integer.value;
  336. hpx2->pci_exp_devctl_and = fields[10].integer.value;
  337. hpx2->pci_exp_devctl_or = fields[11].integer.value;
  338. hpx2->pci_exp_lnkctl_and = fields[12].integer.value;
  339. hpx2->pci_exp_lnkctl_or = fields[13].integer.value;
  340. hpx2->sec_unc_err_sever_and = fields[14].integer.value;
  341. hpx2->sec_unc_err_sever_or = fields[15].integer.value;
  342. hpx2->sec_unc_err_mask_and = fields[16].integer.value;
  343. hpx2->sec_unc_err_mask_or = fields[17].integer.value;
  344. break;
  345. default:
  346. pr_warn("%s: Type 2 Revision %d record not supported\n",
  347. __func__, revision);
  348. return AE_ERROR;
  349. }
  350. return AE_OK;
  351. }
  352. /* _HPX PCI Express Setting Record (Type 3) */
  353. struct hpx_type3 {
  354. u16 device_type;
  355. u16 function_type;
  356. u16 config_space_location;
  357. u16 pci_exp_cap_id;
  358. u16 pci_exp_cap_ver;
  359. u16 pci_exp_vendor_id;
  360. u16 dvsec_id;
  361. u16 dvsec_rev;
  362. u16 match_offset;
  363. u32 match_mask_and;
  364. u32 match_value;
  365. u16 reg_offset;
  366. u32 reg_mask_and;
  367. u32 reg_mask_or;
  368. };
  369. enum hpx_type3_dev_type {
  370. HPX_TYPE_ENDPOINT = BIT(0),
  371. HPX_TYPE_LEG_END = BIT(1),
  372. HPX_TYPE_RC_END = BIT(2),
  373. HPX_TYPE_RC_EC = BIT(3),
  374. HPX_TYPE_ROOT_PORT = BIT(4),
  375. HPX_TYPE_UPSTREAM = BIT(5),
  376. HPX_TYPE_DOWNSTREAM = BIT(6),
  377. HPX_TYPE_PCI_BRIDGE = BIT(7),
  378. HPX_TYPE_PCIE_BRIDGE = BIT(8),
  379. };
  380. static u16 hpx3_device_type(struct pci_dev *dev)
  381. {
  382. u16 pcie_type = pci_pcie_type(dev);
  383. static const int pcie_to_hpx3_type[] = {
  384. [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
  385. [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
  386. [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
  387. [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC,
  388. [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT,
  389. [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM,
  390. [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM,
  391. [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE,
  392. [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
  393. };
  394. if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
  395. return 0;
  396. return pcie_to_hpx3_type[pcie_type];
  397. }
  398. enum hpx_type3_fn_type {
  399. HPX_FN_NORMAL = BIT(0),
  400. HPX_FN_SRIOV_PHYS = BIT(1),
  401. HPX_FN_SRIOV_VIRT = BIT(2),
  402. };
  403. static u8 hpx3_function_type(struct pci_dev *dev)
  404. {
  405. if (dev->is_virtfn)
  406. return HPX_FN_SRIOV_VIRT;
  407. else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
  408. return HPX_FN_SRIOV_PHYS;
  409. else
  410. return HPX_FN_NORMAL;
  411. }
  412. static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
  413. {
  414. u8 cap_ver = hpx3_cap_id & 0xf;
  415. if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
  416. return true;
  417. else if (cap_ver == pcie_cap_id)
  418. return true;
  419. return false;
  420. }
  421. enum hpx_type3_cfg_loc {
  422. HPX_CFG_PCICFG = 0,
  423. HPX_CFG_PCIE_CAP = 1,
  424. HPX_CFG_PCIE_CAP_EXT = 2,
  425. HPX_CFG_VEND_CAP = 3,
  426. HPX_CFG_DVSEC = 4,
  427. HPX_CFG_MAX,
  428. };
  429. static void program_hpx_type3_register(struct pci_dev *dev,
  430. const struct hpx_type3 *reg)
  431. {
  432. u32 match_reg, write_reg, header, orig_value;
  433. u16 pos;
  434. if (!(hpx3_device_type(dev) & reg->device_type))
  435. return;
  436. if (!(hpx3_function_type(dev) & reg->function_type))
  437. return;
  438. switch (reg->config_space_location) {
  439. case HPX_CFG_PCICFG:
  440. pos = 0;
  441. break;
  442. case HPX_CFG_PCIE_CAP:
  443. pos = pci_find_capability(dev, reg->pci_exp_cap_id);
  444. if (pos == 0)
  445. return;
  446. break;
  447. case HPX_CFG_PCIE_CAP_EXT:
  448. pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
  449. if (pos == 0)
  450. return;
  451. pci_read_config_dword(dev, pos, &header);
  452. if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
  453. reg->pci_exp_cap_ver))
  454. return;
  455. break;
  456. case HPX_CFG_VEND_CAP:
  457. case HPX_CFG_DVSEC:
  458. default:
  459. pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
  460. return;
  461. }
  462. pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
  463. if ((match_reg & reg->match_mask_and) != reg->match_value)
  464. return;
  465. pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
  466. orig_value = write_reg;
  467. write_reg &= reg->reg_mask_and;
  468. write_reg |= reg->reg_mask_or;
  469. if (orig_value == write_reg)
  470. return;
  471. pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
  472. pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
  473. pos, orig_value, write_reg);
  474. }
  475. static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
  476. {
  477. if (!hpx)
  478. return;
  479. if (!pci_is_pcie(dev))
  480. return;
  481. program_hpx_type3_register(dev, hpx);
  482. }
  483. static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
  484. union acpi_object *reg_fields)
  485. {
  486. hpx3_reg->device_type = reg_fields[0].integer.value;
  487. hpx3_reg->function_type = reg_fields[1].integer.value;
  488. hpx3_reg->config_space_location = reg_fields[2].integer.value;
  489. hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value;
  490. hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value;
  491. hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value;
  492. hpx3_reg->dvsec_id = reg_fields[6].integer.value;
  493. hpx3_reg->dvsec_rev = reg_fields[7].integer.value;
  494. hpx3_reg->match_offset = reg_fields[8].integer.value;
  495. hpx3_reg->match_mask_and = reg_fields[9].integer.value;
  496. hpx3_reg->match_value = reg_fields[10].integer.value;
  497. hpx3_reg->reg_offset = reg_fields[11].integer.value;
  498. hpx3_reg->reg_mask_and = reg_fields[12].integer.value;
  499. hpx3_reg->reg_mask_or = reg_fields[13].integer.value;
  500. }
  501. static acpi_status program_type3_hpx_record(struct pci_dev *dev,
  502. union acpi_object *record)
  503. {
  504. union acpi_object *fields = record->package.elements;
  505. u32 desc_count, expected_length, revision;
  506. union acpi_object *reg_fields;
  507. struct hpx_type3 hpx3;
  508. int i;
  509. revision = fields[1].integer.value;
  510. switch (revision) {
  511. case 1:
  512. desc_count = fields[2].integer.value;
  513. expected_length = 3 + desc_count * 14;
  514. if (record->package.count != expected_length)
  515. return AE_ERROR;
  516. for (i = 2; i < expected_length; i++)
  517. if (fields[i].type != ACPI_TYPE_INTEGER)
  518. return AE_ERROR;
  519. for (i = 0; i < desc_count; i++) {
  520. reg_fields = fields + 3 + i * 14;
  521. parse_hpx3_register(&hpx3, reg_fields);
  522. program_hpx_type3(dev, &hpx3);
  523. }
  524. break;
  525. default:
  526. printk(KERN_WARNING
  527. "%s: Type 3 Revision %d record not supported\n",
  528. __func__, revision);
  529. return AE_ERROR;
  530. }
  531. return AE_OK;
  532. }
  533. static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
  534. {
  535. acpi_status status;
  536. struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
  537. union acpi_object *package, *record, *fields;
  538. struct hpx_type0 hpx0;
  539. struct hpx_type1 hpx1;
  540. struct hpx_type2 hpx2;
  541. u32 type;
  542. int i;
  543. status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
  544. if (ACPI_FAILURE(status))
  545. return status;
  546. package = (union acpi_object *)buffer.pointer;
  547. if (package->type != ACPI_TYPE_PACKAGE) {
  548. status = AE_ERROR;
  549. goto exit;
  550. }
  551. for (i = 0; i < package->package.count; i++) {
  552. record = &package->package.elements[i];
  553. if (record->type != ACPI_TYPE_PACKAGE) {
  554. status = AE_ERROR;
  555. goto exit;
  556. }
  557. fields = record->package.elements;
  558. if (fields[0].type != ACPI_TYPE_INTEGER ||
  559. fields[1].type != ACPI_TYPE_INTEGER) {
  560. status = AE_ERROR;
  561. goto exit;
  562. }
  563. type = fields[0].integer.value;
  564. switch (type) {
  565. case 0:
  566. memset(&hpx0, 0, sizeof(hpx0));
  567. status = decode_type0_hpx_record(record, &hpx0);
  568. if (ACPI_FAILURE(status))
  569. goto exit;
  570. program_hpx_type0(dev, &hpx0);
  571. break;
  572. case 1:
  573. memset(&hpx1, 0, sizeof(hpx1));
  574. status = decode_type1_hpx_record(record, &hpx1);
  575. if (ACPI_FAILURE(status))
  576. goto exit;
  577. program_hpx_type1(dev, &hpx1);
  578. break;
  579. case 2:
  580. memset(&hpx2, 0, sizeof(hpx2));
  581. status = decode_type2_hpx_record(record, &hpx2);
  582. if (ACPI_FAILURE(status))
  583. goto exit;
  584. program_hpx_type2(dev, &hpx2);
  585. break;
  586. case 3:
  587. status = program_type3_hpx_record(dev, record);
  588. if (ACPI_FAILURE(status))
  589. goto exit;
  590. break;
  591. default:
  592. pr_err("%s: Type %d record not supported\n",
  593. __func__, type);
  594. status = AE_ERROR;
  595. goto exit;
  596. }
  597. }
  598. exit:
  599. kfree(buffer.pointer);
  600. return status;
  601. }
  602. static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
  603. {
  604. acpi_status status;
  605. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  606. union acpi_object *package, *fields;
  607. struct hpx_type0 hpx0;
  608. int i;
  609. memset(&hpx0, 0, sizeof(hpx0));
  610. status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
  611. if (ACPI_FAILURE(status))
  612. return status;
  613. package = (union acpi_object *) buffer.pointer;
  614. if (package->type != ACPI_TYPE_PACKAGE ||
  615. package->package.count != 4) {
  616. status = AE_ERROR;
  617. goto exit;
  618. }
  619. fields = package->package.elements;
  620. for (i = 0; i < 4; i++) {
  621. if (fields[i].type != ACPI_TYPE_INTEGER) {
  622. status = AE_ERROR;
  623. goto exit;
  624. }
  625. }
  626. hpx0.revision = 1;
  627. hpx0.cache_line_size = fields[0].integer.value;
  628. hpx0.latency_timer = fields[1].integer.value;
  629. hpx0.enable_serr = fields[2].integer.value;
  630. hpx0.enable_perr = fields[3].integer.value;
  631. program_hpx_type0(dev, &hpx0);
  632. exit:
  633. kfree(buffer.pointer);
  634. return status;
  635. }
  636. /* pci_acpi_program_hp_params
  637. *
  638. * @dev - the pci_dev for which we want parameters
  639. */
  640. int pci_acpi_program_hp_params(struct pci_dev *dev)
  641. {
  642. acpi_status status;
  643. acpi_handle handle, phandle;
  644. struct pci_bus *pbus;
  645. if (acpi_pci_disabled)
  646. return -ENODEV;
  647. handle = NULL;
  648. for (pbus = dev->bus; pbus; pbus = pbus->parent) {
  649. handle = acpi_pci_get_bridge_handle(pbus);
  650. if (handle)
  651. break;
  652. }
  653. /*
  654. * _HPP settings apply to all child buses, until another _HPP is
  655. * encountered. If we don't find an _HPP for the input pci dev,
  656. * look for it in the parent device scope since that would apply to
  657. * this pci dev.
  658. */
  659. while (handle) {
  660. status = acpi_run_hpx(dev, handle);
  661. if (ACPI_SUCCESS(status))
  662. return 0;
  663. status = acpi_run_hpp(dev, handle);
  664. if (ACPI_SUCCESS(status))
  665. return 0;
  666. if (acpi_is_root_bridge(handle))
  667. break;
  668. status = acpi_get_parent(handle, &phandle);
  669. if (ACPI_FAILURE(status))
  670. break;
  671. handle = phandle;
  672. }
  673. return -ENODEV;
  674. }
  675. /**
  676. * pciehp_is_native - Check whether a hotplug port is handled by the OS
  677. * @bridge: Hotplug port to check
  678. *
  679. * Returns true if the given @bridge is handled by the native PCIe hotplug
  680. * driver.
  681. */
  682. bool pciehp_is_native(struct pci_dev *bridge)
  683. {
  684. const struct pci_host_bridge *host;
  685. u32 slot_cap;
  686. if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
  687. return false;
  688. pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
  689. if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
  690. return false;
  691. if (pcie_ports_native)
  692. return true;
  693. host = pci_find_host_bridge(bridge->bus);
  694. return host->native_pcie_hotplug;
  695. }
  696. /**
  697. * shpchp_is_native - Check whether a hotplug port is handled by the OS
  698. * @bridge: Hotplug port to check
  699. *
  700. * Returns true if the given @bridge is handled by the native SHPC hotplug
  701. * driver.
  702. */
  703. bool shpchp_is_native(struct pci_dev *bridge)
  704. {
  705. return bridge->shpc_managed;
  706. }
  707. /**
  708. * pci_acpi_wake_bus - Root bus wakeup notification fork function.
  709. * @context: Device wakeup context.
  710. */
  711. static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
  712. {
  713. struct acpi_device *adev;
  714. struct acpi_pci_root *root;
  715. adev = container_of(context, struct acpi_device, wakeup.context);
  716. root = acpi_driver_data(adev);
  717. pci_pme_wakeup_bus(root->bus);
  718. }
  719. /**
  720. * pci_acpi_wake_dev - PCI device wakeup notification work function.
  721. * @context: Device wakeup context.
  722. */
  723. static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
  724. {
  725. struct pci_dev *pci_dev;
  726. pci_dev = to_pci_dev(context->dev);
  727. if (pci_dev->pme_poll)
  728. pci_dev->pme_poll = false;
  729. if (pci_dev->current_state == PCI_D3cold) {
  730. pci_wakeup_event(pci_dev);
  731. pm_request_resume(&pci_dev->dev);
  732. return;
  733. }
  734. /* Clear PME Status if set. */
  735. if (pci_dev->pme_support)
  736. pci_check_pme_status(pci_dev);
  737. pci_wakeup_event(pci_dev);
  738. pm_request_resume(&pci_dev->dev);
  739. pci_pme_wakeup_bus(pci_dev->subordinate);
  740. }
  741. /**
  742. * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
  743. * @dev: PCI root bridge ACPI device.
  744. */
  745. acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
  746. {
  747. return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
  748. }
  749. /**
  750. * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
  751. * @dev: ACPI device to add the notifier for.
  752. * @pci_dev: PCI device to check for the PME status if an event is signaled.
  753. */
  754. acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
  755. struct pci_dev *pci_dev)
  756. {
  757. return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
  758. }
  759. /*
  760. * _SxD returns the D-state with the highest power
  761. * (lowest D-state number) supported in the S-state "x".
  762. *
  763. * If the devices does not have a _PRW
  764. * (Power Resources for Wake) supporting system wakeup from "x"
  765. * then the OS is free to choose a lower power (higher number
  766. * D-state) than the return value from _SxD.
  767. *
  768. * But if _PRW is enabled at S-state "x", the OS
  769. * must not choose a power lower than _SxD --
  770. * unless the device has an _SxW method specifying
  771. * the lowest power (highest D-state number) the device
  772. * may enter while still able to wake the system.
  773. *
  774. * ie. depending on global OS policy:
  775. *
  776. * if (_PRW at S-state x)
  777. * choose from highest power _SxD to lowest power _SxW
  778. * else // no _PRW at S-state x
  779. * choose highest power _SxD or any lower power
  780. */
  781. static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
  782. {
  783. int acpi_state, d_max;
  784. if (pdev->no_d3cold)
  785. d_max = ACPI_STATE_D3_HOT;
  786. else
  787. d_max = ACPI_STATE_D3_COLD;
  788. acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
  789. if (acpi_state < 0)
  790. return PCI_POWER_ERROR;
  791. switch (acpi_state) {
  792. case ACPI_STATE_D0:
  793. return PCI_D0;
  794. case ACPI_STATE_D1:
  795. return PCI_D1;
  796. case ACPI_STATE_D2:
  797. return PCI_D2;
  798. case ACPI_STATE_D3_HOT:
  799. return PCI_D3hot;
  800. case ACPI_STATE_D3_COLD:
  801. return PCI_D3cold;
  802. }
  803. return PCI_POWER_ERROR;
  804. }
  805. static struct acpi_device *acpi_pci_find_companion(struct device *dev);
  806. static bool acpi_pci_bridge_d3(struct pci_dev *dev)
  807. {
  808. const struct fwnode_handle *fwnode;
  809. struct acpi_device *adev;
  810. struct pci_dev *root;
  811. u8 val;
  812. if (!dev->is_hotplug_bridge)
  813. return false;
  814. /* Assume D3 support if the bridge is power-manageable by ACPI. */
  815. adev = ACPI_COMPANION(&dev->dev);
  816. if (!adev && !pci_dev_is_added(dev)) {
  817. adev = acpi_pci_find_companion(&dev->dev);
  818. ACPI_COMPANION_SET(&dev->dev, adev);
  819. }
  820. if (adev && acpi_device_power_manageable(adev))
  821. return true;
  822. /*
  823. * Look for a special _DSD property for the root port and if it
  824. * is set we know the hierarchy behind it supports D3 just fine.
  825. */
  826. root = pcie_find_root_port(dev);
  827. if (!root)
  828. return false;
  829. adev = ACPI_COMPANION(&root->dev);
  830. if (root == dev) {
  831. /*
  832. * It is possible that the ACPI companion is not yet bound
  833. * for the root port so look it up manually here.
  834. */
  835. if (!adev && !pci_dev_is_added(root))
  836. adev = acpi_pci_find_companion(&root->dev);
  837. }
  838. if (!adev)
  839. return false;
  840. fwnode = acpi_fwnode_handle(adev);
  841. if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
  842. return false;
  843. return val == 1;
  844. }
  845. static bool acpi_pci_power_manageable(struct pci_dev *dev)
  846. {
  847. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  848. return adev ? acpi_device_power_manageable(adev) : false;
  849. }
  850. static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
  851. {
  852. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  853. static const u8 state_conv[] = {
  854. [PCI_D0] = ACPI_STATE_D0,
  855. [PCI_D1] = ACPI_STATE_D1,
  856. [PCI_D2] = ACPI_STATE_D2,
  857. [PCI_D3hot] = ACPI_STATE_D3_HOT,
  858. [PCI_D3cold] = ACPI_STATE_D3_COLD,
  859. };
  860. int error = -EINVAL;
  861. /* If the ACPI device has _EJ0, ignore the device */
  862. if (!adev || acpi_has_method(adev->handle, "_EJ0"))
  863. return -ENODEV;
  864. switch (state) {
  865. case PCI_D3cold:
  866. if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
  867. PM_QOS_FLAGS_ALL) {
  868. error = -EBUSY;
  869. break;
  870. }
  871. fallthrough;
  872. case PCI_D0:
  873. case PCI_D1:
  874. case PCI_D2:
  875. case PCI_D3hot:
  876. error = acpi_device_set_power(adev, state_conv[state]);
  877. }
  878. if (!error)
  879. pci_dbg(dev, "power state changed by ACPI to %s\n",
  880. acpi_power_state_string(state_conv[state]));
  881. return error;
  882. }
  883. static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
  884. {
  885. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  886. static const pci_power_t state_conv[] = {
  887. [ACPI_STATE_D0] = PCI_D0,
  888. [ACPI_STATE_D1] = PCI_D1,
  889. [ACPI_STATE_D2] = PCI_D2,
  890. [ACPI_STATE_D3_HOT] = PCI_D3hot,
  891. [ACPI_STATE_D3_COLD] = PCI_D3cold,
  892. };
  893. int state;
  894. if (!adev || !acpi_device_power_manageable(adev))
  895. return PCI_UNKNOWN;
  896. state = adev->power.state;
  897. if (state == ACPI_STATE_UNKNOWN)
  898. return PCI_UNKNOWN;
  899. return state_conv[state];
  900. }
  901. static void acpi_pci_refresh_power_state(struct pci_dev *dev)
  902. {
  903. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  904. if (adev && acpi_device_power_manageable(adev))
  905. acpi_device_update_power(adev, NULL);
  906. }
  907. static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
  908. {
  909. while (bus->parent) {
  910. if (acpi_pm_device_can_wakeup(&bus->self->dev))
  911. return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
  912. bus = bus->parent;
  913. }
  914. /* We have reached the root bus. */
  915. if (bus->bridge) {
  916. if (acpi_pm_device_can_wakeup(bus->bridge))
  917. return acpi_pm_set_device_wakeup(bus->bridge, enable);
  918. }
  919. return 0;
  920. }
  921. static int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
  922. {
  923. if (acpi_pm_device_can_wakeup(&dev->dev))
  924. return acpi_pm_set_device_wakeup(&dev->dev, enable);
  925. return acpi_pci_propagate_wakeup(dev->bus, enable);
  926. }
  927. static bool acpi_pci_need_resume(struct pci_dev *dev)
  928. {
  929. struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
  930. /*
  931. * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
  932. * system-wide suspend/resume confuses the platform firmware, so avoid
  933. * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint
  934. * devices are expected to be in D3 before invoking the S3 entry path
  935. * from the firmware, so they should not be affected by this issue.
  936. */
  937. if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
  938. return true;
  939. if (!adev || !acpi_device_power_manageable(adev))
  940. return false;
  941. if (adev->wakeup.flags.valid &&
  942. device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
  943. return true;
  944. if (acpi_target_system_state() == ACPI_STATE_S0)
  945. return false;
  946. return !!adev->power.flags.dsw_present;
  947. }
  948. static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
  949. .bridge_d3 = acpi_pci_bridge_d3,
  950. .is_manageable = acpi_pci_power_manageable,
  951. .set_state = acpi_pci_set_power_state,
  952. .get_state = acpi_pci_get_power_state,
  953. .refresh_state = acpi_pci_refresh_power_state,
  954. .choose_state = acpi_pci_choose_state,
  955. .set_wakeup = acpi_pci_wakeup,
  956. .need_resume = acpi_pci_need_resume,
  957. };
  958. void acpi_pci_add_bus(struct pci_bus *bus)
  959. {
  960. union acpi_object *obj;
  961. struct pci_host_bridge *bridge;
  962. if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
  963. return;
  964. acpi_pci_slot_enumerate(bus);
  965. acpiphp_enumerate_slots(bus);
  966. /*
  967. * For a host bridge, check its _DSM for function 8 and if
  968. * that is available, mark it in pci_host_bridge.
  969. */
  970. if (!pci_is_root_bus(bus))
  971. return;
  972. obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
  973. DSM_PCI_POWER_ON_RESET_DELAY, NULL);
  974. if (!obj)
  975. return;
  976. if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
  977. bridge = pci_find_host_bridge(bus);
  978. bridge->ignore_reset_delay = 1;
  979. }
  980. ACPI_FREE(obj);
  981. }
  982. void acpi_pci_remove_bus(struct pci_bus *bus)
  983. {
  984. if (acpi_pci_disabled || !bus->bridge)
  985. return;
  986. acpiphp_remove_slots(bus);
  987. acpi_pci_slot_remove(bus);
  988. }
  989. /* ACPI bus type */
  990. static struct acpi_device *acpi_pci_find_companion(struct device *dev)
  991. {
  992. struct pci_dev *pci_dev = to_pci_dev(dev);
  993. bool check_children;
  994. u64 addr;
  995. check_children = pci_is_bridge(pci_dev);
  996. /* Please ref to ACPI spec for the syntax of _ADR */
  997. addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
  998. return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
  999. check_children);
  1000. }
  1001. /**
  1002. * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
  1003. * @pdev: the PCI device whose delay is to be updated
  1004. * @handle: ACPI handle of this device
  1005. *
  1006. * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
  1007. * control method of either the device itself or the PCI host bridge.
  1008. *
  1009. * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
  1010. * host bridge. If it returns one, the OS may assume that all devices in
  1011. * the hierarchy have already completed power-on reset delays.
  1012. *
  1013. * Function 9, "Device Readiness Durations," applies only to the object
  1014. * where it is located. It returns delay durations required after various
  1015. * events if the device requires less time than the spec requires. Delays
  1016. * from this function take precedence over the Reset Delay function.
  1017. *
  1018. * These _DSM functions are defined by the draft ECN of January 28, 2014,
  1019. * titled "ACPI additions for FW latency optimizations."
  1020. */
  1021. static void pci_acpi_optimize_delay(struct pci_dev *pdev,
  1022. acpi_handle handle)
  1023. {
  1024. struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
  1025. int value;
  1026. union acpi_object *obj, *elements;
  1027. if (bridge->ignore_reset_delay)
  1028. pdev->d3cold_delay = 0;
  1029. obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
  1030. DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
  1031. if (!obj)
  1032. return;
  1033. if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
  1034. elements = obj->package.elements;
  1035. if (elements[0].type == ACPI_TYPE_INTEGER) {
  1036. value = (int)elements[0].integer.value / 1000;
  1037. if (value < PCI_PM_D3COLD_WAIT)
  1038. pdev->d3cold_delay = value;
  1039. }
  1040. if (elements[3].type == ACPI_TYPE_INTEGER) {
  1041. value = (int)elements[3].integer.value / 1000;
  1042. if (value < PCI_PM_D3HOT_WAIT)
  1043. pdev->d3hot_delay = value;
  1044. }
  1045. }
  1046. ACPI_FREE(obj);
  1047. }
  1048. static void pci_acpi_set_external_facing(struct pci_dev *dev)
  1049. {
  1050. u8 val;
  1051. if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
  1052. return;
  1053. if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
  1054. return;
  1055. /*
  1056. * These root ports expose PCIe (including DMA) outside of the
  1057. * system. Everything downstream from them is external.
  1058. */
  1059. if (val)
  1060. dev->external_facing = 1;
  1061. }
  1062. static void pci_acpi_setup(struct device *dev)
  1063. {
  1064. struct pci_dev *pci_dev = to_pci_dev(dev);
  1065. struct acpi_device *adev = ACPI_COMPANION(dev);
  1066. if (!adev)
  1067. return;
  1068. pci_acpi_optimize_delay(pci_dev, adev->handle);
  1069. pci_acpi_set_external_facing(pci_dev);
  1070. pci_acpi_add_edr_notifier(pci_dev);
  1071. pci_acpi_add_pm_notifier(adev, pci_dev);
  1072. if (!adev->wakeup.flags.valid)
  1073. return;
  1074. device_set_wakeup_capable(dev, true);
  1075. /*
  1076. * For bridges that can do D3 we enable wake automatically (as
  1077. * we do for the power management itself in that case). The
  1078. * reason is that the bridge may have additional methods such as
  1079. * _DSW that need to be called.
  1080. */
  1081. if (pci_dev->bridge_d3)
  1082. device_wakeup_enable(dev);
  1083. acpi_pci_wakeup(pci_dev, false);
  1084. acpi_device_power_add_dependent(adev, dev);
  1085. }
  1086. static void pci_acpi_cleanup(struct device *dev)
  1087. {
  1088. struct acpi_device *adev = ACPI_COMPANION(dev);
  1089. struct pci_dev *pci_dev = to_pci_dev(dev);
  1090. if (!adev)
  1091. return;
  1092. pci_acpi_remove_edr_notifier(pci_dev);
  1093. pci_acpi_remove_pm_notifier(adev);
  1094. if (adev->wakeup.flags.valid) {
  1095. acpi_device_power_remove_dependent(adev, dev);
  1096. if (pci_dev->bridge_d3)
  1097. device_wakeup_disable(dev);
  1098. device_set_wakeup_capable(dev, false);
  1099. }
  1100. }
  1101. static bool pci_acpi_bus_match(struct device *dev)
  1102. {
  1103. return dev_is_pci(dev);
  1104. }
  1105. static struct acpi_bus_type acpi_pci_bus = {
  1106. .name = "PCI",
  1107. .match = pci_acpi_bus_match,
  1108. .find_companion = acpi_pci_find_companion,
  1109. .setup = pci_acpi_setup,
  1110. .cleanup = pci_acpi_cleanup,
  1111. };
  1112. static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
  1113. /**
  1114. * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
  1115. * @fn: Callback matching a device to a fwnode that identifies a PCI
  1116. * MSI domain.
  1117. *
  1118. * This should be called by irqchip driver, which is the parent of
  1119. * the MSI domain to provide callback interface to query fwnode.
  1120. */
  1121. void
  1122. pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
  1123. {
  1124. pci_msi_get_fwnode_cb = fn;
  1125. }
  1126. /**
  1127. * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
  1128. * @bus: The PCI host bridge bus.
  1129. *
  1130. * This function uses the callback function registered by
  1131. * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
  1132. * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
  1133. * This returns NULL on error or when the domain is not found.
  1134. */
  1135. struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
  1136. {
  1137. struct fwnode_handle *fwnode;
  1138. if (!pci_msi_get_fwnode_cb)
  1139. return NULL;
  1140. fwnode = pci_msi_get_fwnode_cb(&bus->dev);
  1141. if (!fwnode)
  1142. return NULL;
  1143. return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
  1144. }
  1145. static int __init acpi_pci_init(void)
  1146. {
  1147. int ret;
  1148. if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
  1149. pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
  1150. pci_no_msi();
  1151. }
  1152. if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
  1153. pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
  1154. pcie_no_aspm();
  1155. }
  1156. ret = register_acpi_bus_type(&acpi_pci_bus);
  1157. if (ret)
  1158. return 0;
  1159. pci_set_platform_pm(&acpi_pci_platform_pm);
  1160. acpi_pci_slot_init();
  1161. acpiphp_init();
  1162. return 0;
  1163. }
  1164. arch_initcall(acpi_pci_init);