pci-uclass.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (c) 2014 Google, Inc
  4. * Written by Simon Glass <sjg@chromium.org>
  5. */
  6. #include <common.h>
  7. #include <dm.h>
  8. #include <errno.h>
  9. #include <init.h>
  10. #include <log.h>
  11. #include <malloc.h>
  12. #include <pci.h>
  13. #include <asm/io.h>
  14. #include <dm/device-internal.h>
  15. #include <dm/lists.h>
  16. #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
  17. #include <asm/fsp/fsp_support.h>
  18. #endif
  19. #include <linux/delay.h>
  20. #include "pci_internal.h"
  21. DECLARE_GLOBAL_DATA_PTR;
  22. int pci_get_bus(int busnum, struct udevice **busp)
  23. {
  24. int ret;
  25. ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
  26. /* Since buses may not be numbered yet try a little harder with bus 0 */
  27. if (ret == -ENODEV) {
  28. ret = uclass_first_device_err(UCLASS_PCI, busp);
  29. if (ret)
  30. return ret;
  31. ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
  32. }
  33. return ret;
  34. }
  35. struct udevice *pci_get_controller(struct udevice *dev)
  36. {
  37. while (device_is_on_pci_bus(dev))
  38. dev = dev->parent;
  39. return dev;
  40. }
  41. pci_dev_t dm_pci_get_bdf(const struct udevice *dev)
  42. {
  43. struct pci_child_plat *pplat = dev_get_parent_plat(dev);
  44. struct udevice *bus = dev->parent;
  45. /*
  46. * This error indicates that @dev is a device on an unprobed PCI bus.
  47. * The bus likely has bus=seq == -1, so the PCI_ADD_BUS() macro below
  48. * will produce a bad BDF>
  49. *
  50. * A common cause of this problem is that this function is called in the
  51. * of_to_plat() method of @dev. Accessing the PCI bus in that
  52. * method is not allowed, since it has not yet been probed. To fix this,
  53. * move that access to the probe() method of @dev instead.
  54. */
  55. if (!device_active(bus))
  56. log_err("PCI: Device '%s' on unprobed bus '%s'\n", dev->name,
  57. bus->name);
  58. return PCI_ADD_BUS(dev_seq(bus), pplat->devfn);
  59. }
  60. /**
  61. * pci_get_bus_max() - returns the bus number of the last active bus
  62. *
  63. * @return last bus number, or -1 if no active buses
  64. */
  65. static int pci_get_bus_max(void)
  66. {
  67. struct udevice *bus;
  68. struct uclass *uc;
  69. int ret = -1;
  70. ret = uclass_get(UCLASS_PCI, &uc);
  71. uclass_foreach_dev(bus, uc) {
  72. if (dev_seq(bus) > ret)
  73. ret = dev_seq(bus);
  74. }
  75. debug("%s: ret=%d\n", __func__, ret);
  76. return ret;
  77. }
  78. int pci_last_busno(void)
  79. {
  80. return pci_get_bus_max();
  81. }
  82. int pci_get_ff(enum pci_size_t size)
  83. {
  84. switch (size) {
  85. case PCI_SIZE_8:
  86. return 0xff;
  87. case PCI_SIZE_16:
  88. return 0xffff;
  89. default:
  90. return 0xffffffff;
  91. }
  92. }
  93. static void pci_dev_find_ofnode(struct udevice *bus, phys_addr_t bdf,
  94. ofnode *rnode)
  95. {
  96. struct fdt_pci_addr addr;
  97. ofnode node;
  98. int ret;
  99. dev_for_each_subnode(node, bus) {
  100. ret = ofnode_read_pci_addr(node, FDT_PCI_SPACE_CONFIG, "reg",
  101. &addr);
  102. if (ret)
  103. continue;
  104. if (PCI_MASK_BUS(addr.phys_hi) != PCI_MASK_BUS(bdf))
  105. continue;
  106. *rnode = node;
  107. break;
  108. }
  109. };
  110. int pci_bus_find_devfn(const struct udevice *bus, pci_dev_t find_devfn,
  111. struct udevice **devp)
  112. {
  113. struct udevice *dev;
  114. for (device_find_first_child(bus, &dev);
  115. dev;
  116. device_find_next_child(&dev)) {
  117. struct pci_child_plat *pplat;
  118. pplat = dev_get_parent_plat(dev);
  119. if (pplat && pplat->devfn == find_devfn) {
  120. *devp = dev;
  121. return 0;
  122. }
  123. }
  124. return -ENODEV;
  125. }
  126. int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp)
  127. {
  128. struct udevice *bus;
  129. int ret;
  130. ret = pci_get_bus(PCI_BUS(bdf), &bus);
  131. if (ret)
  132. return ret;
  133. return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp);
  134. }
  135. static int pci_device_matches_ids(struct udevice *dev,
  136. struct pci_device_id *ids)
  137. {
  138. struct pci_child_plat *pplat;
  139. int i;
  140. pplat = dev_get_parent_plat(dev);
  141. if (!pplat)
  142. return -EINVAL;
  143. for (i = 0; ids[i].vendor != 0; i++) {
  144. if (pplat->vendor == ids[i].vendor &&
  145. pplat->device == ids[i].device)
  146. return i;
  147. }
  148. return -EINVAL;
  149. }
  150. int pci_bus_find_devices(struct udevice *bus, struct pci_device_id *ids,
  151. int *indexp, struct udevice **devp)
  152. {
  153. struct udevice *dev;
  154. /* Scan all devices on this bus */
  155. for (device_find_first_child(bus, &dev);
  156. dev;
  157. device_find_next_child(&dev)) {
  158. if (pci_device_matches_ids(dev, ids) >= 0) {
  159. if ((*indexp)-- <= 0) {
  160. *devp = dev;
  161. return 0;
  162. }
  163. }
  164. }
  165. return -ENODEV;
  166. }
  167. int pci_find_device_id(struct pci_device_id *ids, int index,
  168. struct udevice **devp)
  169. {
  170. struct udevice *bus;
  171. /* Scan all known buses */
  172. for (uclass_first_device(UCLASS_PCI, &bus);
  173. bus;
  174. uclass_next_device(&bus)) {
  175. if (!pci_bus_find_devices(bus, ids, &index, devp))
  176. return 0;
  177. }
  178. *devp = NULL;
  179. return -ENODEV;
  180. }
  181. static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor,
  182. unsigned int device, int *indexp,
  183. struct udevice **devp)
  184. {
  185. struct pci_child_plat *pplat;
  186. struct udevice *dev;
  187. for (device_find_first_child(bus, &dev);
  188. dev;
  189. device_find_next_child(&dev)) {
  190. pplat = dev_get_parent_plat(dev);
  191. if (pplat->vendor == vendor && pplat->device == device) {
  192. if (!(*indexp)--) {
  193. *devp = dev;
  194. return 0;
  195. }
  196. }
  197. }
  198. return -ENODEV;
  199. }
  200. int dm_pci_find_device(unsigned int vendor, unsigned int device, int index,
  201. struct udevice **devp)
  202. {
  203. struct udevice *bus;
  204. /* Scan all known buses */
  205. for (uclass_first_device(UCLASS_PCI, &bus);
  206. bus;
  207. uclass_next_device(&bus)) {
  208. if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp))
  209. return device_probe(*devp);
  210. }
  211. *devp = NULL;
  212. return -ENODEV;
  213. }
  214. int dm_pci_find_class(uint find_class, int index, struct udevice **devp)
  215. {
  216. struct udevice *dev;
  217. /* Scan all known buses */
  218. for (pci_find_first_device(&dev);
  219. dev;
  220. pci_find_next_device(&dev)) {
  221. struct pci_child_plat *pplat = dev_get_parent_plat(dev);
  222. if (pplat->class == find_class && !index--) {
  223. *devp = dev;
  224. return device_probe(*devp);
  225. }
  226. }
  227. *devp = NULL;
  228. return -ENODEV;
  229. }
  230. int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset,
  231. unsigned long value, enum pci_size_t size)
  232. {
  233. struct dm_pci_ops *ops;
  234. ops = pci_get_ops(bus);
  235. if (!ops->write_config)
  236. return -ENOSYS;
  237. return ops->write_config(bus, bdf, offset, value, size);
  238. }
  239. int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset,
  240. u32 clr, u32 set)
  241. {
  242. ulong val;
  243. int ret;
  244. ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32);
  245. if (ret)
  246. return ret;
  247. val &= ~clr;
  248. val |= set;
  249. return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32);
  250. }
  251. int pci_write_config(pci_dev_t bdf, int offset, unsigned long value,
  252. enum pci_size_t size)
  253. {
  254. struct udevice *bus;
  255. int ret;
  256. ret = pci_get_bus(PCI_BUS(bdf), &bus);
  257. if (ret)
  258. return ret;
  259. return pci_bus_write_config(bus, bdf, offset, value, size);
  260. }
  261. int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value,
  262. enum pci_size_t size)
  263. {
  264. struct udevice *bus;
  265. for (bus = dev; device_is_on_pci_bus(bus);)
  266. bus = bus->parent;
  267. return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value,
  268. size);
  269. }
  270. int pci_write_config32(pci_dev_t bdf, int offset, u32 value)
  271. {
  272. return pci_write_config(bdf, offset, value, PCI_SIZE_32);
  273. }
  274. int pci_write_config16(pci_dev_t bdf, int offset, u16 value)
  275. {
  276. return pci_write_config(bdf, offset, value, PCI_SIZE_16);
  277. }
  278. int pci_write_config8(pci_dev_t bdf, int offset, u8 value)
  279. {
  280. return pci_write_config(bdf, offset, value, PCI_SIZE_8);
  281. }
  282. int dm_pci_write_config8(struct udevice *dev, int offset, u8 value)
  283. {
  284. return dm_pci_write_config(dev, offset, value, PCI_SIZE_8);
  285. }
  286. int dm_pci_write_config16(struct udevice *dev, int offset, u16 value)
  287. {
  288. return dm_pci_write_config(dev, offset, value, PCI_SIZE_16);
  289. }
  290. int dm_pci_write_config32(struct udevice *dev, int offset, u32 value)
  291. {
  292. return dm_pci_write_config(dev, offset, value, PCI_SIZE_32);
  293. }
  294. int pci_bus_read_config(const struct udevice *bus, pci_dev_t bdf, int offset,
  295. unsigned long *valuep, enum pci_size_t size)
  296. {
  297. struct dm_pci_ops *ops;
  298. ops = pci_get_ops(bus);
  299. if (!ops->read_config)
  300. return -ENOSYS;
  301. return ops->read_config(bus, bdf, offset, valuep, size);
  302. }
  303. int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep,
  304. enum pci_size_t size)
  305. {
  306. struct udevice *bus;
  307. int ret;
  308. ret = pci_get_bus(PCI_BUS(bdf), &bus);
  309. if (ret)
  310. return ret;
  311. return pci_bus_read_config(bus, bdf, offset, valuep, size);
  312. }
  313. int dm_pci_read_config(const struct udevice *dev, int offset,
  314. unsigned long *valuep, enum pci_size_t size)
  315. {
  316. const struct udevice *bus;
  317. for (bus = dev; device_is_on_pci_bus(bus);)
  318. bus = bus->parent;
  319. return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep,
  320. size);
  321. }
  322. int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep)
  323. {
  324. unsigned long value;
  325. int ret;
  326. ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32);
  327. if (ret)
  328. return ret;
  329. *valuep = value;
  330. return 0;
  331. }
  332. int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep)
  333. {
  334. unsigned long value;
  335. int ret;
  336. ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16);
  337. if (ret)
  338. return ret;
  339. *valuep = value;
  340. return 0;
  341. }
  342. int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep)
  343. {
  344. unsigned long value;
  345. int ret;
  346. ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8);
  347. if (ret)
  348. return ret;
  349. *valuep = value;
  350. return 0;
  351. }
  352. int dm_pci_read_config8(const struct udevice *dev, int offset, u8 *valuep)
  353. {
  354. unsigned long value;
  355. int ret;
  356. ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8);
  357. if (ret)
  358. return ret;
  359. *valuep = value;
  360. return 0;
  361. }
  362. int dm_pci_read_config16(const struct udevice *dev, int offset, u16 *valuep)
  363. {
  364. unsigned long value;
  365. int ret;
  366. ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16);
  367. if (ret)
  368. return ret;
  369. *valuep = value;
  370. return 0;
  371. }
  372. int dm_pci_read_config32(const struct udevice *dev, int offset, u32 *valuep)
  373. {
  374. unsigned long value;
  375. int ret;
  376. ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32);
  377. if (ret)
  378. return ret;
  379. *valuep = value;
  380. return 0;
  381. }
  382. int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set)
  383. {
  384. u8 val;
  385. int ret;
  386. ret = dm_pci_read_config8(dev, offset, &val);
  387. if (ret)
  388. return ret;
  389. val &= ~clr;
  390. val |= set;
  391. return dm_pci_write_config8(dev, offset, val);
  392. }
  393. int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set)
  394. {
  395. u16 val;
  396. int ret;
  397. ret = dm_pci_read_config16(dev, offset, &val);
  398. if (ret)
  399. return ret;
  400. val &= ~clr;
  401. val |= set;
  402. return dm_pci_write_config16(dev, offset, val);
  403. }
  404. int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set)
  405. {
  406. u32 val;
  407. int ret;
  408. ret = dm_pci_read_config32(dev, offset, &val);
  409. if (ret)
  410. return ret;
  411. val &= ~clr;
  412. val |= set;
  413. return dm_pci_write_config32(dev, offset, val);
  414. }
  415. static void set_vga_bridge_bits(struct udevice *dev)
  416. {
  417. struct udevice *parent = dev->parent;
  418. u16 bc;
  419. while (dev_seq(parent) != 0) {
  420. dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc);
  421. bc |= PCI_BRIDGE_CTL_VGA;
  422. dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc);
  423. parent = parent->parent;
  424. }
  425. }
  426. int pci_auto_config_devices(struct udevice *bus)
  427. {
  428. struct pci_controller *hose = bus->uclass_priv;
  429. struct pci_child_plat *pplat;
  430. unsigned int sub_bus;
  431. struct udevice *dev;
  432. int ret;
  433. sub_bus = dev_seq(bus);
  434. debug("%s: start\n", __func__);
  435. pciauto_config_init(hose);
  436. for (ret = device_find_first_child(bus, &dev);
  437. !ret && dev;
  438. ret = device_find_next_child(&dev)) {
  439. unsigned int max_bus;
  440. int ret;
  441. debug("%s: device %s\n", __func__, dev->name);
  442. if (dev_of_valid(dev) &&
  443. dev_read_bool(dev, "pci,no-autoconfig"))
  444. continue;
  445. ret = dm_pciauto_config_device(dev);
  446. if (ret < 0)
  447. return ret;
  448. max_bus = ret;
  449. sub_bus = max(sub_bus, max_bus);
  450. pplat = dev_get_parent_plat(dev);
  451. if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8))
  452. set_vga_bridge_bits(dev);
  453. }
  454. debug("%s: done\n", __func__);
  455. return sub_bus;
  456. }
  457. int pci_generic_mmap_write_config(
  458. const struct udevice *bus,
  459. int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset,
  460. void **addrp),
  461. pci_dev_t bdf,
  462. uint offset,
  463. ulong value,
  464. enum pci_size_t size)
  465. {
  466. void *address;
  467. if (addr_f(bus, bdf, offset, &address) < 0)
  468. return 0;
  469. switch (size) {
  470. case PCI_SIZE_8:
  471. writeb(value, address);
  472. return 0;
  473. case PCI_SIZE_16:
  474. writew(value, address);
  475. return 0;
  476. case PCI_SIZE_32:
  477. writel(value, address);
  478. return 0;
  479. default:
  480. return -EINVAL;
  481. }
  482. }
  483. int pci_generic_mmap_read_config(
  484. const struct udevice *bus,
  485. int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset,
  486. void **addrp),
  487. pci_dev_t bdf,
  488. uint offset,
  489. ulong *valuep,
  490. enum pci_size_t size)
  491. {
  492. void *address;
  493. if (addr_f(bus, bdf, offset, &address) < 0) {
  494. *valuep = pci_get_ff(size);
  495. return 0;
  496. }
  497. switch (size) {
  498. case PCI_SIZE_8:
  499. *valuep = readb(address);
  500. return 0;
  501. case PCI_SIZE_16:
  502. *valuep = readw(address);
  503. return 0;
  504. case PCI_SIZE_32:
  505. *valuep = readl(address);
  506. return 0;
  507. default:
  508. return -EINVAL;
  509. }
  510. }
  511. int dm_pci_hose_probe_bus(struct udevice *bus)
  512. {
  513. int sub_bus;
  514. int ret;
  515. int ea_pos;
  516. u8 reg;
  517. debug("%s\n", __func__);
  518. ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA);
  519. if (ea_pos) {
  520. dm_pci_read_config8(bus, ea_pos + sizeof(u32) + sizeof(u8),
  521. &reg);
  522. sub_bus = reg;
  523. } else {
  524. sub_bus = pci_get_bus_max() + 1;
  525. }
  526. debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name);
  527. dm_pciauto_prescan_setup_bridge(bus, sub_bus);
  528. ret = device_probe(bus);
  529. if (ret) {
  530. debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name,
  531. ret);
  532. return ret;
  533. }
  534. if (!ea_pos) {
  535. if (sub_bus != dev_seq(bus)) {
  536. debug("%s: Internal error, bus '%s' got seq %d, expected %d\n",
  537. __func__, bus->name, dev_seq(bus), sub_bus);
  538. return -EPIPE;
  539. }
  540. sub_bus = pci_get_bus_max();
  541. }
  542. dm_pciauto_postscan_setup_bridge(bus, sub_bus);
  543. return sub_bus;
  544. }
  545. /**
  546. * pci_match_one_device - Tell if a PCI device structure has a matching
  547. * PCI device id structure
  548. * @id: single PCI device id structure to match
  549. * @find: the PCI device id structure to match against
  550. *
  551. * Returns true if the finding pci_device_id structure matched or false if
  552. * there is no match.
  553. */
  554. static bool pci_match_one_id(const struct pci_device_id *id,
  555. const struct pci_device_id *find)
  556. {
  557. if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) &&
  558. (id->device == PCI_ANY_ID || id->device == find->device) &&
  559. (id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) &&
  560. (id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) &&
  561. !((id->class ^ find->class) & id->class_mask))
  562. return true;
  563. return false;
  564. }
  565. /**
  566. * pci_find_and_bind_driver() - Find and bind the right PCI driver
  567. *
  568. * This only looks at certain fields in the descriptor.
  569. *
  570. * @parent: Parent bus
  571. * @find_id: Specification of the driver to find
  572. * @bdf: Bus/device/function addreess - see PCI_BDF()
  573. * @devp: Returns a pointer to the device created
  574. * @return 0 if OK, -EPERM if the device is not needed before relocation and
  575. * therefore was not created, other -ve value on error
  576. */
  577. static int pci_find_and_bind_driver(struct udevice *parent,
  578. struct pci_device_id *find_id,
  579. pci_dev_t bdf, struct udevice **devp)
  580. {
  581. struct pci_driver_entry *start, *entry;
  582. ofnode node = ofnode_null();
  583. const char *drv;
  584. int n_ents;
  585. int ret;
  586. char name[30], *str;
  587. bool bridge;
  588. *devp = NULL;
  589. debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__,
  590. find_id->vendor, find_id->device);
  591. /* Determine optional OF node */
  592. if (ofnode_valid(dev_ofnode(parent)))
  593. pci_dev_find_ofnode(parent, bdf, &node);
  594. if (ofnode_valid(node) && !ofnode_is_available(node)) {
  595. debug("%s: Ignoring disabled device\n", __func__);
  596. return -EPERM;
  597. }
  598. start = ll_entry_start(struct pci_driver_entry, pci_driver_entry);
  599. n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry);
  600. for (entry = start; entry != start + n_ents; entry++) {
  601. const struct pci_device_id *id;
  602. struct udevice *dev;
  603. const struct driver *drv;
  604. for (id = entry->match;
  605. id->vendor || id->subvendor || id->class_mask;
  606. id++) {
  607. if (!pci_match_one_id(id, find_id))
  608. continue;
  609. drv = entry->driver;
  610. /*
  611. * In the pre-relocation phase, we only bind devices
  612. * whose driver has the DM_FLAG_PRE_RELOC set, to save
  613. * precious memory space as on some platforms as that
  614. * space is pretty limited (ie: using Cache As RAM).
  615. */
  616. if (!(gd->flags & GD_FLG_RELOC) &&
  617. !(drv->flags & DM_FLAG_PRE_RELOC))
  618. return -EPERM;
  619. /*
  620. * We could pass the descriptor to the driver as
  621. * plat (instead of NULL) and allow its bind()
  622. * method to return -ENOENT if it doesn't support this
  623. * device. That way we could continue the search to
  624. * find another driver. For now this doesn't seem
  625. * necesssary, so just bind the first match.
  626. */
  627. ret = device_bind(parent, drv, drv->name, NULL, node,
  628. &dev);
  629. if (ret)
  630. goto error;
  631. debug("%s: Match found: %s\n", __func__, drv->name);
  632. dev->driver_data = id->driver_data;
  633. *devp = dev;
  634. return 0;
  635. }
  636. }
  637. bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI;
  638. /*
  639. * In the pre-relocation phase, we only bind bridge devices to save
  640. * precious memory space as on some platforms as that space is pretty
  641. * limited (ie: using Cache As RAM).
  642. */
  643. if (!(gd->flags & GD_FLG_RELOC) && !bridge)
  644. return -EPERM;
  645. /* Bind a generic driver so that the device can be used */
  646. sprintf(name, "pci_%x:%x.%x", dev_seq(parent), PCI_DEV(bdf),
  647. PCI_FUNC(bdf));
  648. str = strdup(name);
  649. if (!str)
  650. return -ENOMEM;
  651. drv = bridge ? "pci_bridge_drv" : "pci_generic_drv";
  652. ret = device_bind_driver_to_node(parent, drv, str, node, devp);
  653. if (ret) {
  654. debug("%s: Failed to bind generic driver: %d\n", __func__, ret);
  655. free(str);
  656. return ret;
  657. }
  658. debug("%s: No match found: bound generic driver instead\n", __func__);
  659. return 0;
  660. error:
  661. debug("%s: No match found: error %d\n", __func__, ret);
  662. return ret;
  663. }
  664. int pci_bind_bus_devices(struct udevice *bus)
  665. {
  666. ulong vendor, device;
  667. ulong header_type;
  668. pci_dev_t bdf, end;
  669. bool found_multi;
  670. int ari_off;
  671. int ret;
  672. found_multi = false;
  673. end = PCI_BDF(dev_seq(bus), PCI_MAX_PCI_DEVICES - 1,
  674. PCI_MAX_PCI_FUNCTIONS - 1);
  675. for (bdf = PCI_BDF(dev_seq(bus), 0, 0); bdf <= end;
  676. bdf += PCI_BDF(0, 0, 1)) {
  677. struct pci_child_plat *pplat;
  678. struct udevice *dev;
  679. ulong class;
  680. if (!PCI_FUNC(bdf))
  681. found_multi = false;
  682. if (PCI_FUNC(bdf) && !found_multi)
  683. continue;
  684. /* Check only the first access, we don't expect problems */
  685. ret = pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor,
  686. PCI_SIZE_16);
  687. if (ret)
  688. goto error;
  689. if (vendor == 0xffff || vendor == 0x0000)
  690. continue;
  691. pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE,
  692. &header_type, PCI_SIZE_8);
  693. if (!PCI_FUNC(bdf))
  694. found_multi = header_type & 0x80;
  695. debug("%s: bus %d/%s: found device %x, function %d", __func__,
  696. dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
  697. pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device,
  698. PCI_SIZE_16);
  699. pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class,
  700. PCI_SIZE_32);
  701. class >>= 8;
  702. /* Find this device in the device tree */
  703. ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
  704. debug(": find ret=%d\n", ret);
  705. /* If nothing in the device tree, bind a device */
  706. if (ret == -ENODEV) {
  707. struct pci_device_id find_id;
  708. ulong val;
  709. memset(&find_id, '\0', sizeof(find_id));
  710. find_id.vendor = vendor;
  711. find_id.device = device;
  712. find_id.class = class;
  713. if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) {
  714. pci_bus_read_config(bus, bdf,
  715. PCI_SUBSYSTEM_VENDOR_ID,
  716. &val, PCI_SIZE_32);
  717. find_id.subvendor = val & 0xffff;
  718. find_id.subdevice = val >> 16;
  719. }
  720. ret = pci_find_and_bind_driver(bus, &find_id, bdf,
  721. &dev);
  722. }
  723. if (ret == -EPERM)
  724. continue;
  725. else if (ret)
  726. return ret;
  727. /* Update the platform data */
  728. pplat = dev_get_parent_plat(dev);
  729. pplat->devfn = PCI_MASK_BUS(bdf);
  730. pplat->vendor = vendor;
  731. pplat->device = device;
  732. pplat->class = class;
  733. if (IS_ENABLED(CONFIG_PCI_ARID)) {
  734. ari_off = dm_pci_find_ext_capability(dev,
  735. PCI_EXT_CAP_ID_ARI);
  736. if (ari_off) {
  737. u16 ari_cap;
  738. /*
  739. * Read Next Function number in ARI Cap
  740. * Register
  741. */
  742. dm_pci_read_config16(dev, ari_off + 4,
  743. &ari_cap);
  744. /*
  745. * Update next scan on this function number,
  746. * subtract 1 in BDF to satisfy loop increment.
  747. */
  748. if (ari_cap & 0xff00) {
  749. bdf = PCI_BDF(PCI_BUS(bdf),
  750. PCI_DEV(ari_cap),
  751. PCI_FUNC(ari_cap));
  752. bdf = bdf - 0x100;
  753. }
  754. }
  755. }
  756. }
  757. return 0;
  758. error:
  759. printf("Cannot read bus configuration: %d\n", ret);
  760. return ret;
  761. }
  762. static void decode_regions(struct pci_controller *hose, ofnode parent_node,
  763. ofnode node)
  764. {
  765. int pci_addr_cells, addr_cells, size_cells;
  766. int cells_per_record;
  767. struct bd_info *bd;
  768. const u32 *prop;
  769. int max_regions;
  770. int len;
  771. int i;
  772. prop = ofnode_get_property(node, "ranges", &len);
  773. if (!prop) {
  774. debug("%s: Cannot decode regions\n", __func__);
  775. return;
  776. }
  777. pci_addr_cells = ofnode_read_simple_addr_cells(node);
  778. addr_cells = ofnode_read_simple_addr_cells(parent_node);
  779. size_cells = ofnode_read_simple_size_cells(node);
  780. /* PCI addresses are always 3-cells */
  781. len /= sizeof(u32);
  782. cells_per_record = pci_addr_cells + addr_cells + size_cells;
  783. hose->region_count = 0;
  784. debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
  785. cells_per_record);
  786. /* Dynamically allocate the regions array */
  787. max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS;
  788. hose->regions = (struct pci_region *)
  789. calloc(1, max_regions * sizeof(struct pci_region));
  790. for (i = 0; i < max_regions; i++, len -= cells_per_record) {
  791. u64 pci_addr, addr, size;
  792. int space_code;
  793. u32 flags;
  794. int type;
  795. int pos;
  796. if (len < cells_per_record)
  797. break;
  798. flags = fdt32_to_cpu(prop[0]);
  799. space_code = (flags >> 24) & 3;
  800. pci_addr = fdtdec_get_number(prop + 1, 2);
  801. prop += pci_addr_cells;
  802. addr = fdtdec_get_number(prop, addr_cells);
  803. prop += addr_cells;
  804. size = fdtdec_get_number(prop, size_cells);
  805. prop += size_cells;
  806. debug("%s: region %d, pci_addr=%llx, addr=%llx, size=%llx, space_code=%d\n",
  807. __func__, hose->region_count, pci_addr, addr, size, space_code);
  808. if (space_code & 2) {
  809. type = flags & (1U << 30) ? PCI_REGION_PREFETCH :
  810. PCI_REGION_MEM;
  811. } else if (space_code & 1) {
  812. type = PCI_REGION_IO;
  813. } else {
  814. continue;
  815. }
  816. if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) &&
  817. type == PCI_REGION_MEM && upper_32_bits(pci_addr)) {
  818. debug(" - beyond the 32-bit boundary, ignoring\n");
  819. continue;
  820. }
  821. pos = -1;
  822. if (!IS_ENABLED(CONFIG_PCI_REGION_MULTI_ENTRY)) {
  823. for (i = 0; i < hose->region_count; i++) {
  824. if (hose->regions[i].flags == type)
  825. pos = i;
  826. }
  827. }
  828. if (pos == -1)
  829. pos = hose->region_count++;
  830. debug(" - type=%d, pos=%d\n", type, pos);
  831. pci_set_region(hose->regions + pos, pci_addr, addr, size, type);
  832. }
  833. /* Add a region for our local memory */
  834. bd = gd->bd;
  835. if (!bd)
  836. return;
  837. for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) {
  838. if (bd->bi_dram[i].size) {
  839. pci_set_region(hose->regions + hose->region_count++,
  840. bd->bi_dram[i].start,
  841. bd->bi_dram[i].start,
  842. bd->bi_dram[i].size,
  843. PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
  844. }
  845. }
  846. return;
  847. }
  848. static int pci_uclass_pre_probe(struct udevice *bus)
  849. {
  850. struct pci_controller *hose;
  851. debug("%s, bus=%d/%s, parent=%s\n", __func__, dev_seq(bus), bus->name,
  852. bus->parent->name);
  853. hose = bus->uclass_priv;
  854. /* For bridges, use the top-level PCI controller */
  855. if (!device_is_on_pci_bus(bus)) {
  856. hose->ctlr = bus;
  857. decode_regions(hose, dev_ofnode(bus->parent), dev_ofnode(bus));
  858. } else {
  859. struct pci_controller *parent_hose;
  860. parent_hose = dev_get_uclass_priv(bus->parent);
  861. hose->ctlr = parent_hose->bus;
  862. }
  863. hose->bus = bus;
  864. hose->first_busno = dev_seq(bus);
  865. hose->last_busno = dev_seq(bus);
  866. if (dev_of_valid(bus)) {
  867. hose->skip_auto_config_until_reloc =
  868. dev_read_bool(bus,
  869. "u-boot,skip-auto-config-until-reloc");
  870. }
  871. return 0;
  872. }
  873. static int pci_uclass_post_probe(struct udevice *bus)
  874. {
  875. struct pci_controller *hose = dev_get_uclass_priv(bus);
  876. int ret;
  877. debug("%s: probing bus %d\n", __func__, dev_seq(bus));
  878. ret = pci_bind_bus_devices(bus);
  879. if (ret)
  880. return ret;
  881. if (CONFIG_IS_ENABLED(PCI_PNP) && ll_boot_init() &&
  882. (!hose->skip_auto_config_until_reloc ||
  883. (gd->flags & GD_FLG_RELOC))) {
  884. ret = pci_auto_config_devices(bus);
  885. if (ret < 0)
  886. return log_msg_ret("pci auto-config", ret);
  887. }
  888. #if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
  889. /*
  890. * Per Intel FSP specification, we should call FSP notify API to
  891. * inform FSP that PCI enumeration has been done so that FSP will
  892. * do any necessary initialization as required by the chipset's
  893. * BIOS Writer's Guide (BWG).
  894. *
  895. * Unfortunately we have to put this call here as with driver model,
  896. * the enumeration is all done on a lazy basis as needed, so until
  897. * something is touched on PCI it won't happen.
  898. *
  899. * Note we only call this 1) after U-Boot is relocated, and 2)
  900. * root bus has finished probing.
  901. */
  902. if ((gd->flags & GD_FLG_RELOC) && dev_seq(bus) == 0 && ll_boot_init()) {
  903. ret = fsp_init_phase_pci();
  904. if (ret)
  905. return ret;
  906. }
  907. #endif
  908. return 0;
  909. }
  910. static int pci_uclass_child_post_bind(struct udevice *dev)
  911. {
  912. struct pci_child_plat *pplat;
  913. if (!dev_of_valid(dev))
  914. return 0;
  915. pplat = dev_get_parent_plat(dev);
  916. /* Extract vendor id and device id if available */
  917. ofnode_read_pci_vendev(dev_ofnode(dev), &pplat->vendor, &pplat->device);
  918. /* Extract the devfn from fdt_pci_addr */
  919. pplat->devfn = pci_get_devfn(dev);
  920. return 0;
  921. }
  922. static int pci_bridge_read_config(const struct udevice *bus, pci_dev_t bdf,
  923. uint offset, ulong *valuep,
  924. enum pci_size_t size)
  925. {
  926. struct pci_controller *hose = bus->uclass_priv;
  927. return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size);
  928. }
  929. static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf,
  930. uint offset, ulong value,
  931. enum pci_size_t size)
  932. {
  933. struct pci_controller *hose = bus->uclass_priv;
  934. return pci_bus_write_config(hose->ctlr, bdf, offset, value, size);
  935. }
  936. static int skip_to_next_device(struct udevice *bus, struct udevice **devp)
  937. {
  938. struct udevice *dev;
  939. int ret = 0;
  940. /*
  941. * Scan through all the PCI controllers. On x86 there will only be one
  942. * but that is not necessarily true on other hardware.
  943. */
  944. do {
  945. device_find_first_child(bus, &dev);
  946. if (dev) {
  947. *devp = dev;
  948. return 0;
  949. }
  950. ret = uclass_next_device(&bus);
  951. if (ret)
  952. return ret;
  953. } while (bus);
  954. return 0;
  955. }
  956. int pci_find_next_device(struct udevice **devp)
  957. {
  958. struct udevice *child = *devp;
  959. struct udevice *bus = child->parent;
  960. int ret;
  961. /* First try all the siblings */
  962. *devp = NULL;
  963. while (child) {
  964. device_find_next_child(&child);
  965. if (child) {
  966. *devp = child;
  967. return 0;
  968. }
  969. }
  970. /* We ran out of siblings. Try the next bus */
  971. ret = uclass_next_device(&bus);
  972. if (ret)
  973. return ret;
  974. return bus ? skip_to_next_device(bus, devp) : 0;
  975. }
  976. int pci_find_first_device(struct udevice **devp)
  977. {
  978. struct udevice *bus;
  979. int ret;
  980. *devp = NULL;
  981. ret = uclass_first_device(UCLASS_PCI, &bus);
  982. if (ret)
  983. return ret;
  984. return skip_to_next_device(bus, devp);
  985. }
  986. ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size)
  987. {
  988. switch (size) {
  989. case PCI_SIZE_8:
  990. return (value >> ((offset & 3) * 8)) & 0xff;
  991. case PCI_SIZE_16:
  992. return (value >> ((offset & 2) * 8)) & 0xffff;
  993. default:
  994. return value;
  995. }
  996. }
  997. ulong pci_conv_size_to_32(ulong old, ulong value, uint offset,
  998. enum pci_size_t size)
  999. {
  1000. uint off_mask;
  1001. uint val_mask, shift;
  1002. ulong ldata, mask;
  1003. switch (size) {
  1004. case PCI_SIZE_8:
  1005. off_mask = 3;
  1006. val_mask = 0xff;
  1007. break;
  1008. case PCI_SIZE_16:
  1009. off_mask = 2;
  1010. val_mask = 0xffff;
  1011. break;
  1012. default:
  1013. return value;
  1014. }
  1015. shift = (offset & off_mask) * 8;
  1016. ldata = (value & val_mask) << shift;
  1017. mask = val_mask << shift;
  1018. value = (old & ~mask) | ldata;
  1019. return value;
  1020. }
  1021. int pci_get_dma_regions(struct udevice *dev, struct pci_region *memp, int index)
  1022. {
  1023. int pci_addr_cells, addr_cells, size_cells;
  1024. int cells_per_record;
  1025. const u32 *prop;
  1026. int len;
  1027. int i = 0;
  1028. prop = ofnode_get_property(dev_ofnode(dev), "dma-ranges", &len);
  1029. if (!prop) {
  1030. log_err("PCI: Device '%s': Cannot decode dma-ranges\n",
  1031. dev->name);
  1032. return -EINVAL;
  1033. }
  1034. pci_addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev));
  1035. addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev->parent));
  1036. size_cells = ofnode_read_simple_size_cells(dev_ofnode(dev));
  1037. /* PCI addresses are always 3-cells */
  1038. len /= sizeof(u32);
  1039. cells_per_record = pci_addr_cells + addr_cells + size_cells;
  1040. debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
  1041. cells_per_record);
  1042. while (len) {
  1043. memp->bus_start = fdtdec_get_number(prop + 1, 2);
  1044. prop += pci_addr_cells;
  1045. memp->phys_start = fdtdec_get_number(prop, addr_cells);
  1046. prop += addr_cells;
  1047. memp->size = fdtdec_get_number(prop, size_cells);
  1048. prop += size_cells;
  1049. if (i == index)
  1050. return 0;
  1051. i++;
  1052. len -= cells_per_record;
  1053. }
  1054. return -EINVAL;
  1055. }
  1056. int pci_get_regions(struct udevice *dev, struct pci_region **iop,
  1057. struct pci_region **memp, struct pci_region **prefp)
  1058. {
  1059. struct udevice *bus = pci_get_controller(dev);
  1060. struct pci_controller *hose = dev_get_uclass_priv(bus);
  1061. int i;
  1062. *iop = NULL;
  1063. *memp = NULL;
  1064. *prefp = NULL;
  1065. for (i = 0; i < hose->region_count; i++) {
  1066. switch (hose->regions[i].flags) {
  1067. case PCI_REGION_IO:
  1068. if (!*iop || (*iop)->size < hose->regions[i].size)
  1069. *iop = hose->regions + i;
  1070. break;
  1071. case PCI_REGION_MEM:
  1072. if (!*memp || (*memp)->size < hose->regions[i].size)
  1073. *memp = hose->regions + i;
  1074. break;
  1075. case (PCI_REGION_MEM | PCI_REGION_PREFETCH):
  1076. if (!*prefp || (*prefp)->size < hose->regions[i].size)
  1077. *prefp = hose->regions + i;
  1078. break;
  1079. }
  1080. }
  1081. return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL);
  1082. }
  1083. u32 dm_pci_read_bar32(const struct udevice *dev, int barnum)
  1084. {
  1085. u32 addr;
  1086. int bar;
  1087. bar = PCI_BASE_ADDRESS_0 + barnum * 4;
  1088. dm_pci_read_config32(dev, bar, &addr);
  1089. /*
  1090. * If we get an invalid address, return this so that comparisons with
  1091. * FDT_ADDR_T_NONE work correctly
  1092. */
  1093. if (addr == 0xffffffff)
  1094. return addr;
  1095. else if (addr & PCI_BASE_ADDRESS_SPACE_IO)
  1096. return addr & PCI_BASE_ADDRESS_IO_MASK;
  1097. else
  1098. return addr & PCI_BASE_ADDRESS_MEM_MASK;
  1099. }
  1100. void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr)
  1101. {
  1102. int bar;
  1103. bar = PCI_BASE_ADDRESS_0 + barnum * 4;
  1104. dm_pci_write_config32(dev, bar, addr);
  1105. }
  1106. static int _dm_pci_bus_to_phys(struct udevice *ctlr,
  1107. pci_addr_t bus_addr, unsigned long flags,
  1108. unsigned long skip_mask, phys_addr_t *pa)
  1109. {
  1110. struct pci_controller *hose = dev_get_uclass_priv(ctlr);
  1111. struct pci_region *res;
  1112. int i;
  1113. if (hose->region_count == 0) {
  1114. *pa = bus_addr;
  1115. return 0;
  1116. }
  1117. for (i = 0; i < hose->region_count; i++) {
  1118. res = &hose->regions[i];
  1119. if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
  1120. continue;
  1121. if (res->flags & skip_mask)
  1122. continue;
  1123. if (bus_addr >= res->bus_start &&
  1124. (bus_addr - res->bus_start) < res->size) {
  1125. *pa = (bus_addr - res->bus_start + res->phys_start);
  1126. return 0;
  1127. }
  1128. }
  1129. return 1;
  1130. }
  1131. phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr,
  1132. unsigned long flags)
  1133. {
  1134. phys_addr_t phys_addr = 0;
  1135. struct udevice *ctlr;
  1136. int ret;
  1137. /* The root controller has the region information */
  1138. ctlr = pci_get_controller(dev);
  1139. /*
  1140. * if PCI_REGION_MEM is set we do a two pass search with preference
  1141. * on matches that don't have PCI_REGION_SYS_MEMORY set
  1142. */
  1143. if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
  1144. ret = _dm_pci_bus_to_phys(ctlr, bus_addr,
  1145. flags, PCI_REGION_SYS_MEMORY,
  1146. &phys_addr);
  1147. if (!ret)
  1148. return phys_addr;
  1149. }
  1150. ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr);
  1151. if (ret)
  1152. puts("pci_hose_bus_to_phys: invalid physical address\n");
  1153. return phys_addr;
  1154. }
  1155. int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
  1156. unsigned long flags, unsigned long skip_mask,
  1157. pci_addr_t *ba)
  1158. {
  1159. struct pci_region *res;
  1160. struct udevice *ctlr;
  1161. pci_addr_t bus_addr;
  1162. int i;
  1163. struct pci_controller *hose;
  1164. /* The root controller has the region information */
  1165. ctlr = pci_get_controller(dev);
  1166. hose = dev_get_uclass_priv(ctlr);
  1167. if (hose->region_count == 0) {
  1168. *ba = phys_addr;
  1169. return 0;
  1170. }
  1171. for (i = 0; i < hose->region_count; i++) {
  1172. res = &hose->regions[i];
  1173. if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
  1174. continue;
  1175. if (res->flags & skip_mask)
  1176. continue;
  1177. bus_addr = phys_addr - res->phys_start + res->bus_start;
  1178. if (bus_addr >= res->bus_start &&
  1179. (bus_addr - res->bus_start) < res->size) {
  1180. *ba = bus_addr;
  1181. return 0;
  1182. }
  1183. }
  1184. return 1;
  1185. }
  1186. pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
  1187. unsigned long flags)
  1188. {
  1189. pci_addr_t bus_addr = 0;
  1190. int ret;
  1191. /*
  1192. * if PCI_REGION_MEM is set we do a two pass search with preference
  1193. * on matches that don't have PCI_REGION_SYS_MEMORY set
  1194. */
  1195. if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
  1196. ret = _dm_pci_phys_to_bus(dev, phys_addr, flags,
  1197. PCI_REGION_SYS_MEMORY, &bus_addr);
  1198. if (!ret)
  1199. return bus_addr;
  1200. }
  1201. ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr);
  1202. if (ret)
  1203. puts("pci_hose_phys_to_bus: invalid physical address\n");
  1204. return bus_addr;
  1205. }
  1206. static phys_addr_t dm_pci_map_ea_virt(struct udevice *dev, int ea_off,
  1207. struct pci_child_plat *pdata)
  1208. {
  1209. phys_addr_t addr = 0;
  1210. /*
  1211. * In the case of a Virtual Function device using BAR
  1212. * base and size, add offset for VFn BAR(1, 2, 3...n)
  1213. */
  1214. if (pdata->is_virtfn) {
  1215. size_t sz;
  1216. u32 ea_entry;
  1217. /* MaxOffset, 1st DW */
  1218. dm_pci_read_config32(dev, ea_off + 8, &ea_entry);
  1219. sz = ea_entry & PCI_EA_FIELD_MASK;
  1220. /* Fill up lower 2 bits */
  1221. sz |= (~PCI_EA_FIELD_MASK);
  1222. if (ea_entry & PCI_EA_IS_64) {
  1223. /* MaxOffset 2nd DW */
  1224. dm_pci_read_config32(dev, ea_off + 16, &ea_entry);
  1225. sz |= ((u64)ea_entry) << 32;
  1226. }
  1227. addr = (pdata->virtid - 1) * (sz + 1);
  1228. }
  1229. return addr;
  1230. }
  1231. static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, int flags,
  1232. int ea_off, struct pci_child_plat *pdata)
  1233. {
  1234. int ea_cnt, i, entry_size;
  1235. int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2;
  1236. u32 ea_entry;
  1237. phys_addr_t addr;
  1238. if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
  1239. /*
  1240. * In the case of a Virtual Function device, device is
  1241. * Physical function, so pdata will point to required VF
  1242. * specific data.
  1243. */
  1244. if (pdata->is_virtfn)
  1245. bar_id += PCI_EA_BEI_VF_BAR0;
  1246. }
  1247. /* EA capability structure header */
  1248. dm_pci_read_config32(dev, ea_off, &ea_entry);
  1249. ea_cnt = (ea_entry >> 16) & PCI_EA_NUM_ENT_MASK;
  1250. ea_off += PCI_EA_FIRST_ENT;
  1251. for (i = 0; i < ea_cnt; i++, ea_off += entry_size) {
  1252. /* Entry header */
  1253. dm_pci_read_config32(dev, ea_off, &ea_entry);
  1254. entry_size = ((ea_entry & PCI_EA_ES) + 1) << 2;
  1255. if (((ea_entry & PCI_EA_BEI) >> 4) != bar_id)
  1256. continue;
  1257. /* Base address, 1st DW */
  1258. dm_pci_read_config32(dev, ea_off + 4, &ea_entry);
  1259. addr = ea_entry & PCI_EA_FIELD_MASK;
  1260. if (ea_entry & PCI_EA_IS_64) {
  1261. /* Base address, 2nd DW, skip over 4B MaxOffset */
  1262. dm_pci_read_config32(dev, ea_off + 12, &ea_entry);
  1263. addr |= ((u64)ea_entry) << 32;
  1264. }
  1265. if (IS_ENABLED(CONFIG_PCI_SRIOV))
  1266. addr += dm_pci_map_ea_virt(dev, ea_off, pdata);
  1267. /* size ignored for now */
  1268. return map_physmem(addr, 0, flags);
  1269. }
  1270. return 0;
  1271. }
  1272. void *dm_pci_map_bar(struct udevice *dev, int bar, int flags)
  1273. {
  1274. struct pci_child_plat *pdata = dev_get_parent_plat(dev);
  1275. struct udevice *udev = dev;
  1276. pci_addr_t pci_bus_addr;
  1277. u32 bar_response;
  1278. int ea_off;
  1279. if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
  1280. /*
  1281. * In case of Virtual Function devices, use PF udevice
  1282. * as EA capability is defined in Physical Function
  1283. */
  1284. if (pdata->is_virtfn)
  1285. udev = pdata->pfdev;
  1286. }
  1287. /*
  1288. * if the function supports Enhanced Allocation use that instead of
  1289. * BARs
  1290. * Incase of virtual functions, pdata will help read VF BEI
  1291. * and EA entry size.
  1292. */
  1293. ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA);
  1294. if (ea_off)
  1295. return dm_pci_map_ea_bar(udev, bar, flags, ea_off, pdata);
  1296. /* read BAR address */
  1297. dm_pci_read_config32(udev, bar, &bar_response);
  1298. pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
  1299. /*
  1300. * Pass "0" as the length argument to pci_bus_to_virt. The arg
  1301. * isn't actually used on any platform because U-Boot assumes a static
  1302. * linear mapping. In the future, this could read the BAR size
  1303. * and pass that as the size if needed.
  1304. */
  1305. return dm_pci_bus_to_virt(udev, pci_bus_addr, flags, 0, MAP_NOCACHE);
  1306. }
  1307. static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap)
  1308. {
  1309. int ttl = PCI_FIND_CAP_TTL;
  1310. u8 id;
  1311. u16 ent;
  1312. dm_pci_read_config8(dev, pos, &pos);
  1313. while (ttl--) {
  1314. if (pos < PCI_STD_HEADER_SIZEOF)
  1315. break;
  1316. pos &= ~3;
  1317. dm_pci_read_config16(dev, pos, &ent);
  1318. id = ent & 0xff;
  1319. if (id == 0xff)
  1320. break;
  1321. if (id == cap)
  1322. return pos;
  1323. pos = (ent >> 8);
  1324. }
  1325. return 0;
  1326. }
  1327. int dm_pci_find_next_capability(struct udevice *dev, u8 start, int cap)
  1328. {
  1329. return _dm_pci_find_next_capability(dev, start + PCI_CAP_LIST_NEXT,
  1330. cap);
  1331. }
  1332. int dm_pci_find_capability(struct udevice *dev, int cap)
  1333. {
  1334. u16 status;
  1335. u8 header_type;
  1336. u8 pos;
  1337. dm_pci_read_config16(dev, PCI_STATUS, &status);
  1338. if (!(status & PCI_STATUS_CAP_LIST))
  1339. return 0;
  1340. dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type);
  1341. if ((header_type & 0x7f) == PCI_HEADER_TYPE_CARDBUS)
  1342. pos = PCI_CB_CAPABILITY_LIST;
  1343. else
  1344. pos = PCI_CAPABILITY_LIST;
  1345. return _dm_pci_find_next_capability(dev, pos, cap);
  1346. }
  1347. int dm_pci_find_next_ext_capability(struct udevice *dev, int start, int cap)
  1348. {
  1349. u32 header;
  1350. int ttl;
  1351. int pos = PCI_CFG_SPACE_SIZE;
  1352. /* minimum 8 bytes per capability */
  1353. ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
  1354. if (start)
  1355. pos = start;
  1356. dm_pci_read_config32(dev, pos, &header);
  1357. /*
  1358. * If we have no capabilities, this is indicated by cap ID,
  1359. * cap version and next pointer all being 0.
  1360. */
  1361. if (header == 0)
  1362. return 0;
  1363. while (ttl--) {
  1364. if (PCI_EXT_CAP_ID(header) == cap)
  1365. return pos;
  1366. pos = PCI_EXT_CAP_NEXT(header);
  1367. if (pos < PCI_CFG_SPACE_SIZE)
  1368. break;
  1369. dm_pci_read_config32(dev, pos, &header);
  1370. }
  1371. return 0;
  1372. }
  1373. int dm_pci_find_ext_capability(struct udevice *dev, int cap)
  1374. {
  1375. return dm_pci_find_next_ext_capability(dev, 0, cap);
  1376. }
  1377. int dm_pci_flr(struct udevice *dev)
  1378. {
  1379. int pcie_off;
  1380. u32 cap;
  1381. /* look for PCI Express Capability */
  1382. pcie_off = dm_pci_find_capability(dev, PCI_CAP_ID_EXP);
  1383. if (!pcie_off)
  1384. return -ENOENT;
  1385. /* check FLR capability */
  1386. dm_pci_read_config32(dev, pcie_off + PCI_EXP_DEVCAP, &cap);
  1387. if (!(cap & PCI_EXP_DEVCAP_FLR))
  1388. return -ENOENT;
  1389. dm_pci_clrset_config16(dev, pcie_off + PCI_EXP_DEVCTL, 0,
  1390. PCI_EXP_DEVCTL_BCR_FLR);
  1391. /* wait 100ms, per PCI spec */
  1392. mdelay(100);
  1393. return 0;
  1394. }
  1395. #if defined(CONFIG_PCI_SRIOV)
  1396. int pci_sriov_init(struct udevice *pdev, int vf_en)
  1397. {
  1398. u16 vendor, device;
  1399. struct udevice *bus;
  1400. struct udevice *dev;
  1401. pci_dev_t bdf;
  1402. u16 ctrl;
  1403. u16 num_vfs;
  1404. u16 total_vf;
  1405. u16 vf_offset;
  1406. u16 vf_stride;
  1407. int vf, ret;
  1408. int pos;
  1409. pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  1410. if (!pos) {
  1411. debug("Error: SRIOV capability not found\n");
  1412. return -ENOENT;
  1413. }
  1414. dm_pci_read_config16(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
  1415. dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
  1416. if (vf_en > total_vf)
  1417. vf_en = total_vf;
  1418. dm_pci_write_config16(pdev, pos + PCI_SRIOV_NUM_VF, vf_en);
  1419. ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
  1420. dm_pci_write_config16(pdev, pos + PCI_SRIOV_CTRL, ctrl);
  1421. dm_pci_read_config16(pdev, pos + PCI_SRIOV_NUM_VF, &num_vfs);
  1422. if (num_vfs > vf_en)
  1423. num_vfs = vf_en;
  1424. dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_OFFSET, &vf_offset);
  1425. dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_STRIDE, &vf_stride);
  1426. dm_pci_read_config16(pdev, PCI_VENDOR_ID, &vendor);
  1427. dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_DID, &device);
  1428. bdf = dm_pci_get_bdf(pdev);
  1429. pci_get_bus(PCI_BUS(bdf), &bus);
  1430. if (!bus)
  1431. return -ENODEV;
  1432. bdf += PCI_BDF(0, 0, vf_offset);
  1433. for (vf = 0; vf < num_vfs; vf++) {
  1434. struct pci_child_plat *pplat;
  1435. ulong class;
  1436. pci_bus_read_config(bus, bdf, PCI_CLASS_DEVICE,
  1437. &class, PCI_SIZE_16);
  1438. debug("%s: bus %d/%s: found VF %x:%x\n", __func__,
  1439. dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
  1440. /* Find this device in the device tree */
  1441. ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
  1442. if (ret == -ENODEV) {
  1443. struct pci_device_id find_id;
  1444. memset(&find_id, '\0', sizeof(find_id));
  1445. find_id.vendor = vendor;
  1446. find_id.device = device;
  1447. find_id.class = class;
  1448. ret = pci_find_and_bind_driver(bus, &find_id,
  1449. bdf, &dev);
  1450. if (ret)
  1451. return ret;
  1452. }
  1453. /* Update the platform data */
  1454. pplat = dev_get_parent_plat(dev);
  1455. pplat->devfn = PCI_MASK_BUS(bdf);
  1456. pplat->vendor = vendor;
  1457. pplat->device = device;
  1458. pplat->class = class;
  1459. pplat->is_virtfn = true;
  1460. pplat->pfdev = pdev;
  1461. pplat->virtid = vf * vf_stride + vf_offset;
  1462. debug("%s: bus %d/%s: found VF %x:%x %x:%x class %lx id %x\n",
  1463. __func__, dev_seq(dev), dev->name, PCI_DEV(bdf),
  1464. PCI_FUNC(bdf), vendor, device, class, pplat->virtid);
  1465. bdf += PCI_BDF(0, 0, vf_stride);
  1466. }
  1467. return 0;
  1468. }
  1469. int pci_sriov_get_totalvfs(struct udevice *pdev)
  1470. {
  1471. u16 total_vf;
  1472. int pos;
  1473. pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  1474. if (!pos) {
  1475. debug("Error: SRIOV capability not found\n");
  1476. return -ENOENT;
  1477. }
  1478. dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
  1479. return total_vf;
  1480. }
  1481. #endif /* SRIOV */
  1482. UCLASS_DRIVER(pci) = {
  1483. .id = UCLASS_PCI,
  1484. .name = "pci",
  1485. .flags = DM_UC_FLAG_SEQ_ALIAS,
  1486. .post_bind = dm_scan_fdt_dev,
  1487. .pre_probe = pci_uclass_pre_probe,
  1488. .post_probe = pci_uclass_post_probe,
  1489. .child_post_bind = pci_uclass_child_post_bind,
  1490. .per_device_auto = sizeof(struct pci_controller),
  1491. .per_child_plat_auto = sizeof(struct pci_child_plat),
  1492. };
  1493. static const struct dm_pci_ops pci_bridge_ops = {
  1494. .read_config = pci_bridge_read_config,
  1495. .write_config = pci_bridge_write_config,
  1496. };
  1497. static const struct udevice_id pci_bridge_ids[] = {
  1498. { .compatible = "pci-bridge" },
  1499. { }
  1500. };
  1501. U_BOOT_DRIVER(pci_bridge_drv) = {
  1502. .name = "pci_bridge_drv",
  1503. .id = UCLASS_PCI,
  1504. .of_match = pci_bridge_ids,
  1505. .ops = &pci_bridge_ops,
  1506. };
  1507. UCLASS_DRIVER(pci_generic) = {
  1508. .id = UCLASS_PCI_GENERIC,
  1509. .name = "pci_generic",
  1510. };
  1511. static const struct udevice_id pci_generic_ids[] = {
  1512. { .compatible = "pci-generic" },
  1513. { }
  1514. };
  1515. U_BOOT_DRIVER(pci_generic_drv) = {
  1516. .name = "pci_generic_drv",
  1517. .id = UCLASS_PCI_GENERIC,
  1518. .of_match = pci_generic_ids,
  1519. };
  1520. void pci_init(void)
  1521. {
  1522. struct udevice *bus;
  1523. /*
  1524. * Enumerate all known controller devices. Enumeration has the side-
  1525. * effect of probing them, so PCIe devices will be enumerated too.
  1526. */
  1527. for (uclass_first_device_check(UCLASS_PCI, &bus);
  1528. bus;
  1529. uclass_next_device_check(&bus)) {
  1530. ;
  1531. }
  1532. }