device.c 22 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Device manager
  4. *
  5. * Copyright (c) 2013 Google, Inc
  6. *
  7. * (C) Copyright 2012
  8. * Pavel Herrmann <morpheus.ibis@gmail.com>
  9. */
  10. #include <common.h>
  11. #include <cpu_func.h>
  12. #include <log.h>
  13. #include <asm/io.h>
  14. #include <clk.h>
  15. #include <fdtdec.h>
  16. #include <fdt_support.h>
  17. #include <malloc.h>
  18. #include <asm/cache.h>
  19. #include <dm/device.h>
  20. #include <dm/device-internal.h>
  21. #include <dm/lists.h>
  22. #include <dm/of_access.h>
  23. #include <dm/pinctrl.h>
  24. #include <dm/platdata.h>
  25. #include <dm/read.h>
  26. #include <dm/uclass.h>
  27. #include <dm/uclass-internal.h>
  28. #include <dm/util.h>
  29. #include <linux/err.h>
  30. #include <linux/list.h>
  31. #include <power-domain.h>
  32. DECLARE_GLOBAL_DATA_PTR;
  33. static int device_bind_common(struct udevice *parent, const struct driver *drv,
  34. const char *name, void *platdata,
  35. ulong driver_data, ofnode node,
  36. uint of_platdata_size, struct udevice **devp)
  37. {
  38. struct udevice *dev;
  39. struct uclass *uc;
  40. int size, ret = 0;
  41. if (devp)
  42. *devp = NULL;
  43. if (!name)
  44. return -EINVAL;
  45. ret = uclass_get(drv->id, &uc);
  46. if (ret) {
  47. debug("Missing uclass for driver %s\n", drv->name);
  48. return ret;
  49. }
  50. dev = calloc(1, sizeof(struct udevice));
  51. if (!dev)
  52. return -ENOMEM;
  53. INIT_LIST_HEAD(&dev->sibling_node);
  54. INIT_LIST_HEAD(&dev->child_head);
  55. INIT_LIST_HEAD(&dev->uclass_node);
  56. #ifdef CONFIG_DEVRES
  57. INIT_LIST_HEAD(&dev->devres_head);
  58. #endif
  59. dev->platdata = platdata;
  60. dev->driver_data = driver_data;
  61. dev->name = name;
  62. dev->node = node;
  63. dev->parent = parent;
  64. dev->driver = drv;
  65. dev->uclass = uc;
  66. dev->seq = -1;
  67. dev->req_seq = -1;
  68. if (CONFIG_IS_ENABLED(DM_SEQ_ALIAS) &&
  69. (uc->uc_drv->flags & DM_UC_FLAG_SEQ_ALIAS)) {
  70. /*
  71. * Some devices, such as a SPI bus, I2C bus and serial ports
  72. * are numbered using aliases.
  73. *
  74. * This is just a 'requested' sequence, and will be
  75. * resolved (and ->seq updated) when the device is probed.
  76. */
  77. if (CONFIG_IS_ENABLED(OF_CONTROL) &&
  78. !CONFIG_IS_ENABLED(OF_PLATDATA)) {
  79. if (uc->uc_drv->name && ofnode_valid(node))
  80. dev_read_alias_seq(dev, &dev->req_seq);
  81. #if CONFIG_IS_ENABLED(OF_PRIOR_STAGE)
  82. if (dev->req_seq == -1)
  83. dev->req_seq =
  84. uclass_find_next_free_req_seq(drv->id);
  85. #endif
  86. } else {
  87. dev->req_seq = uclass_find_next_free_req_seq(drv->id);
  88. }
  89. }
  90. if (drv->platdata_auto_alloc_size) {
  91. bool alloc = !platdata;
  92. if (CONFIG_IS_ENABLED(OF_PLATDATA)) {
  93. if (of_platdata_size) {
  94. dev->flags |= DM_FLAG_OF_PLATDATA;
  95. if (of_platdata_size <
  96. drv->platdata_auto_alloc_size)
  97. alloc = true;
  98. }
  99. }
  100. if (alloc) {
  101. dev->flags |= DM_FLAG_ALLOC_PDATA;
  102. dev->platdata = calloc(1,
  103. drv->platdata_auto_alloc_size);
  104. if (!dev->platdata) {
  105. ret = -ENOMEM;
  106. goto fail_alloc1;
  107. }
  108. if (CONFIG_IS_ENABLED(OF_PLATDATA) && platdata) {
  109. memcpy(dev->platdata, platdata,
  110. of_platdata_size);
  111. }
  112. }
  113. }
  114. size = uc->uc_drv->per_device_platdata_auto_alloc_size;
  115. if (size) {
  116. dev->flags |= DM_FLAG_ALLOC_UCLASS_PDATA;
  117. dev->uclass_platdata = calloc(1, size);
  118. if (!dev->uclass_platdata) {
  119. ret = -ENOMEM;
  120. goto fail_alloc2;
  121. }
  122. }
  123. if (parent) {
  124. size = parent->driver->per_child_platdata_auto_alloc_size;
  125. if (!size) {
  126. size = parent->uclass->uc_drv->
  127. per_child_platdata_auto_alloc_size;
  128. }
  129. if (size) {
  130. dev->flags |= DM_FLAG_ALLOC_PARENT_PDATA;
  131. dev->parent_platdata = calloc(1, size);
  132. if (!dev->parent_platdata) {
  133. ret = -ENOMEM;
  134. goto fail_alloc3;
  135. }
  136. }
  137. /* put dev into parent's successor list */
  138. list_add_tail(&dev->sibling_node, &parent->child_head);
  139. }
  140. ret = uclass_bind_device(dev);
  141. if (ret)
  142. goto fail_uclass_bind;
  143. /* if we fail to bind we remove device from successors and free it */
  144. if (drv->bind) {
  145. ret = drv->bind(dev);
  146. if (ret)
  147. goto fail_bind;
  148. }
  149. if (parent && parent->driver->child_post_bind) {
  150. ret = parent->driver->child_post_bind(dev);
  151. if (ret)
  152. goto fail_child_post_bind;
  153. }
  154. if (uc->uc_drv->post_bind) {
  155. ret = uc->uc_drv->post_bind(dev);
  156. if (ret)
  157. goto fail_uclass_post_bind;
  158. }
  159. if (parent)
  160. pr_debug("Bound device %s to %s\n", dev->name, parent->name);
  161. if (devp)
  162. *devp = dev;
  163. dev->flags |= DM_FLAG_BOUND;
  164. return 0;
  165. fail_uclass_post_bind:
  166. /* There is no child unbind() method, so no clean-up required */
  167. fail_child_post_bind:
  168. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  169. if (drv->unbind && drv->unbind(dev)) {
  170. dm_warn("unbind() method failed on dev '%s' on error path\n",
  171. dev->name);
  172. }
  173. }
  174. fail_bind:
  175. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  176. if (uclass_unbind_device(dev)) {
  177. dm_warn("Failed to unbind dev '%s' on error path\n",
  178. dev->name);
  179. }
  180. }
  181. fail_uclass_bind:
  182. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  183. list_del(&dev->sibling_node);
  184. if (dev->flags & DM_FLAG_ALLOC_PARENT_PDATA) {
  185. free(dev->parent_platdata);
  186. dev->parent_platdata = NULL;
  187. }
  188. }
  189. fail_alloc3:
  190. if (dev->flags & DM_FLAG_ALLOC_UCLASS_PDATA) {
  191. free(dev->uclass_platdata);
  192. dev->uclass_platdata = NULL;
  193. }
  194. fail_alloc2:
  195. if (dev->flags & DM_FLAG_ALLOC_PDATA) {
  196. free(dev->platdata);
  197. dev->platdata = NULL;
  198. }
  199. fail_alloc1:
  200. devres_release_all(dev);
  201. free(dev);
  202. return ret;
  203. }
  204. int device_bind_with_driver_data(struct udevice *parent,
  205. const struct driver *drv, const char *name,
  206. ulong driver_data, ofnode node,
  207. struct udevice **devp)
  208. {
  209. return device_bind_common(parent, drv, name, NULL, driver_data, node,
  210. 0, devp);
  211. }
  212. int device_bind(struct udevice *parent, const struct driver *drv,
  213. const char *name, void *platdata, int of_offset,
  214. struct udevice **devp)
  215. {
  216. return device_bind_common(parent, drv, name, platdata, 0,
  217. offset_to_ofnode(of_offset), 0, devp);
  218. }
  219. int device_bind_ofnode(struct udevice *parent, const struct driver *drv,
  220. const char *name, void *platdata, ofnode node,
  221. struct udevice **devp)
  222. {
  223. return device_bind_common(parent, drv, name, platdata, 0, node, 0,
  224. devp);
  225. }
  226. int device_bind_by_name(struct udevice *parent, bool pre_reloc_only,
  227. const struct driver_info *info, struct udevice **devp)
  228. {
  229. struct driver *drv;
  230. uint platdata_size = 0;
  231. int ret;
  232. drv = lists_driver_lookup_name(info->name);
  233. if (!drv)
  234. return -ENOENT;
  235. if (pre_reloc_only && !(drv->flags & DM_FLAG_PRE_RELOC))
  236. return -EPERM;
  237. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  238. platdata_size = info->platdata_size;
  239. #endif
  240. ret = device_bind_common(parent, drv, info->name,
  241. (void *)info->platdata, 0, ofnode_null(),
  242. platdata_size, devp);
  243. if (ret)
  244. return ret;
  245. return ret;
  246. }
  247. int device_reparent(struct udevice *dev, struct udevice *new_parent)
  248. {
  249. struct udevice *pos, *n;
  250. assert(dev);
  251. assert(new_parent);
  252. list_for_each_entry_safe(pos, n, &dev->parent->child_head,
  253. sibling_node) {
  254. if (pos->driver != dev->driver)
  255. continue;
  256. list_del(&dev->sibling_node);
  257. list_add_tail(&dev->sibling_node, &new_parent->child_head);
  258. dev->parent = new_parent;
  259. break;
  260. }
  261. return 0;
  262. }
  263. static void *alloc_priv(int size, uint flags)
  264. {
  265. void *priv;
  266. if (flags & DM_FLAG_ALLOC_PRIV_DMA) {
  267. size = ROUND(size, ARCH_DMA_MINALIGN);
  268. priv = memalign(ARCH_DMA_MINALIGN, size);
  269. if (priv) {
  270. memset(priv, '\0', size);
  271. /*
  272. * Ensure that the zero bytes are flushed to memory.
  273. * This prevents problems if the driver uses this as
  274. * both an input and an output buffer:
  275. *
  276. * 1. Zeroes written to buffer (here) and sit in the
  277. * cache
  278. * 2. Driver issues a read command to DMA
  279. * 3. CPU runs out of cache space and evicts some cache
  280. * data in the buffer, writing zeroes to RAM from
  281. * the memset() above
  282. * 4. DMA completes
  283. * 5. Buffer now has some DMA data and some zeroes
  284. * 6. Data being read is now incorrect
  285. *
  286. * To prevent this, ensure that the cache is clean
  287. * within this range at the start. The driver can then
  288. * use normal flush-after-write, invalidate-before-read
  289. * procedures.
  290. *
  291. * TODO(sjg@chromium.org): Drop this microblaze
  292. * exception.
  293. */
  294. #ifndef CONFIG_MICROBLAZE
  295. flush_dcache_range((ulong)priv, (ulong)priv + size);
  296. #endif
  297. }
  298. } else {
  299. priv = calloc(1, size);
  300. }
  301. return priv;
  302. }
  303. int device_ofdata_to_platdata(struct udevice *dev)
  304. {
  305. const struct driver *drv;
  306. int size = 0;
  307. int ret;
  308. if (!dev)
  309. return -EINVAL;
  310. if (dev->flags & DM_FLAG_PLATDATA_VALID)
  311. return 0;
  312. /* Ensure all parents have ofdata */
  313. if (dev->parent) {
  314. ret = device_ofdata_to_platdata(dev->parent);
  315. if (ret)
  316. goto fail;
  317. /*
  318. * The device might have already been probed during
  319. * the call to device_probe() on its parent device
  320. * (e.g. PCI bridge devices). Test the flags again
  321. * so that we don't mess up the device.
  322. */
  323. if (dev->flags & DM_FLAG_PLATDATA_VALID)
  324. return 0;
  325. }
  326. drv = dev->driver;
  327. assert(drv);
  328. /* Allocate private data if requested and not reentered */
  329. if (drv->priv_auto_alloc_size && !dev->priv) {
  330. dev->priv = alloc_priv(drv->priv_auto_alloc_size, drv->flags);
  331. if (!dev->priv) {
  332. ret = -ENOMEM;
  333. goto fail;
  334. }
  335. }
  336. /* Allocate private data if requested and not reentered */
  337. size = dev->uclass->uc_drv->per_device_auto_alloc_size;
  338. if (size && !dev->uclass_priv) {
  339. dev->uclass_priv = alloc_priv(size,
  340. dev->uclass->uc_drv->flags);
  341. if (!dev->uclass_priv) {
  342. ret = -ENOMEM;
  343. goto fail;
  344. }
  345. }
  346. /* Allocate parent data for this child */
  347. if (dev->parent) {
  348. size = dev->parent->driver->per_child_auto_alloc_size;
  349. if (!size) {
  350. size = dev->parent->uclass->uc_drv->
  351. per_child_auto_alloc_size;
  352. }
  353. if (size && !dev->parent_priv) {
  354. dev->parent_priv = alloc_priv(size, drv->flags);
  355. if (!dev->parent_priv) {
  356. ret = -ENOMEM;
  357. goto fail;
  358. }
  359. }
  360. }
  361. if (drv->ofdata_to_platdata &&
  362. (CONFIG_IS_ENABLED(OF_PLATDATA) || dev_has_of_node(dev))) {
  363. ret = drv->ofdata_to_platdata(dev);
  364. if (ret)
  365. goto fail;
  366. }
  367. dev->flags |= DM_FLAG_PLATDATA_VALID;
  368. return 0;
  369. fail:
  370. device_free(dev);
  371. return ret;
  372. }
  373. int device_probe(struct udevice *dev)
  374. {
  375. const struct driver *drv;
  376. int ret;
  377. int seq;
  378. if (!dev)
  379. return -EINVAL;
  380. if (dev->flags & DM_FLAG_ACTIVATED)
  381. return 0;
  382. drv = dev->driver;
  383. assert(drv);
  384. ret = device_ofdata_to_platdata(dev);
  385. if (ret)
  386. goto fail;
  387. /* Ensure all parents are probed */
  388. if (dev->parent) {
  389. ret = device_probe(dev->parent);
  390. if (ret)
  391. goto fail;
  392. /*
  393. * The device might have already been probed during
  394. * the call to device_probe() on its parent device
  395. * (e.g. PCI bridge devices). Test the flags again
  396. * so that we don't mess up the device.
  397. */
  398. if (dev->flags & DM_FLAG_ACTIVATED)
  399. return 0;
  400. }
  401. seq = uclass_resolve_seq(dev);
  402. if (seq < 0) {
  403. ret = seq;
  404. goto fail;
  405. }
  406. dev->seq = seq;
  407. dev->flags |= DM_FLAG_ACTIVATED;
  408. /*
  409. * Process pinctrl for everything except the root device, and
  410. * continue regardless of the result of pinctrl. Don't process pinctrl
  411. * settings for pinctrl devices since the device may not yet be
  412. * probed.
  413. */
  414. if (dev->parent && device_get_uclass_id(dev) != UCLASS_PINCTRL)
  415. pinctrl_select_state(dev, "default");
  416. if (CONFIG_IS_ENABLED(POWER_DOMAIN) && dev->parent &&
  417. (device_get_uclass_id(dev) != UCLASS_POWER_DOMAIN) &&
  418. !(drv->flags & DM_FLAG_DEFAULT_PD_CTRL_OFF)) {
  419. ret = dev_power_domain_on(dev);
  420. if (ret)
  421. goto fail;
  422. }
  423. ret = uclass_pre_probe_device(dev);
  424. if (ret)
  425. goto fail;
  426. if (dev->parent && dev->parent->driver->child_pre_probe) {
  427. ret = dev->parent->driver->child_pre_probe(dev);
  428. if (ret)
  429. goto fail;
  430. }
  431. /* Only handle devices that have a valid ofnode */
  432. if (dev_of_valid(dev)) {
  433. /*
  434. * Process 'assigned-{clocks/clock-parents/clock-rates}'
  435. * properties
  436. */
  437. ret = clk_set_defaults(dev, 0);
  438. if (ret)
  439. goto fail;
  440. }
  441. if (drv->probe) {
  442. ret = drv->probe(dev);
  443. if (ret)
  444. goto fail;
  445. }
  446. ret = uclass_post_probe_device(dev);
  447. if (ret)
  448. goto fail_uclass;
  449. if (dev->parent && device_get_uclass_id(dev) == UCLASS_PINCTRL)
  450. pinctrl_select_state(dev, "default");
  451. return 0;
  452. fail_uclass:
  453. if (device_remove(dev, DM_REMOVE_NORMAL)) {
  454. dm_warn("%s: Device '%s' failed to remove on error path\n",
  455. __func__, dev->name);
  456. }
  457. fail:
  458. dev->flags &= ~DM_FLAG_ACTIVATED;
  459. dev->seq = -1;
  460. device_free(dev);
  461. return ret;
  462. }
  463. void *dev_get_platdata(const struct udevice *dev)
  464. {
  465. if (!dev) {
  466. dm_warn("%s: null device\n", __func__);
  467. return NULL;
  468. }
  469. return dev->platdata;
  470. }
  471. void *dev_get_parent_platdata(const struct udevice *dev)
  472. {
  473. if (!dev) {
  474. dm_warn("%s: null device\n", __func__);
  475. return NULL;
  476. }
  477. return dev->parent_platdata;
  478. }
  479. void *dev_get_uclass_platdata(const struct udevice *dev)
  480. {
  481. if (!dev) {
  482. dm_warn("%s: null device\n", __func__);
  483. return NULL;
  484. }
  485. return dev->uclass_platdata;
  486. }
  487. void *dev_get_priv(const struct udevice *dev)
  488. {
  489. if (!dev) {
  490. dm_warn("%s: null device\n", __func__);
  491. return NULL;
  492. }
  493. return dev->priv;
  494. }
  495. void *dev_get_uclass_priv(const struct udevice *dev)
  496. {
  497. if (!dev) {
  498. dm_warn("%s: null device\n", __func__);
  499. return NULL;
  500. }
  501. return dev->uclass_priv;
  502. }
  503. void *dev_get_parent_priv(const struct udevice *dev)
  504. {
  505. if (!dev) {
  506. dm_warn("%s: null device\n", __func__);
  507. return NULL;
  508. }
  509. return dev->parent_priv;
  510. }
  511. static int device_get_device_tail(struct udevice *dev, int ret,
  512. struct udevice **devp)
  513. {
  514. if (ret)
  515. return ret;
  516. ret = device_probe(dev);
  517. if (ret)
  518. return ret;
  519. *devp = dev;
  520. return 0;
  521. }
  522. #if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
  523. /**
  524. * device_find_by_ofnode() - Return device associated with given ofnode
  525. *
  526. * The returned device is *not* activated.
  527. *
  528. * @node: The ofnode for which a associated device should be looked up
  529. * @devp: Pointer to structure to hold the found device
  530. * Return: 0 if OK, -ve on error
  531. */
  532. static int device_find_by_ofnode(ofnode node, struct udevice **devp)
  533. {
  534. struct uclass *uc;
  535. struct udevice *dev;
  536. int ret;
  537. list_for_each_entry(uc, &gd->uclass_root, sibling_node) {
  538. ret = uclass_find_device_by_ofnode(uc->uc_drv->id, node,
  539. &dev);
  540. if (!ret || dev) {
  541. *devp = dev;
  542. return 0;
  543. }
  544. }
  545. return -ENODEV;
  546. }
  547. #endif
  548. int device_get_child(const struct udevice *parent, int index,
  549. struct udevice **devp)
  550. {
  551. struct udevice *dev;
  552. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  553. if (!index--)
  554. return device_get_device_tail(dev, 0, devp);
  555. }
  556. return -ENODEV;
  557. }
  558. int device_get_child_count(const struct udevice *parent)
  559. {
  560. struct udevice *dev;
  561. int count = 0;
  562. list_for_each_entry(dev, &parent->child_head, sibling_node)
  563. count++;
  564. return count;
  565. }
  566. int device_find_child_by_seq(const struct udevice *parent, int seq_or_req_seq,
  567. bool find_req_seq, struct udevice **devp)
  568. {
  569. struct udevice *dev;
  570. *devp = NULL;
  571. if (seq_or_req_seq == -1)
  572. return -ENODEV;
  573. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  574. if ((find_req_seq ? dev->req_seq : dev->seq) ==
  575. seq_or_req_seq) {
  576. *devp = dev;
  577. return 0;
  578. }
  579. }
  580. return -ENODEV;
  581. }
  582. int device_get_child_by_seq(const struct udevice *parent, int seq,
  583. struct udevice **devp)
  584. {
  585. struct udevice *dev;
  586. int ret;
  587. *devp = NULL;
  588. ret = device_find_child_by_seq(parent, seq, false, &dev);
  589. if (ret == -ENODEV) {
  590. /*
  591. * We didn't find it in probed devices. See if there is one
  592. * that will request this seq if probed.
  593. */
  594. ret = device_find_child_by_seq(parent, seq, true, &dev);
  595. }
  596. return device_get_device_tail(dev, ret, devp);
  597. }
  598. int device_find_child_by_of_offset(const struct udevice *parent, int of_offset,
  599. struct udevice **devp)
  600. {
  601. struct udevice *dev;
  602. *devp = NULL;
  603. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  604. if (dev_of_offset(dev) == of_offset) {
  605. *devp = dev;
  606. return 0;
  607. }
  608. }
  609. return -ENODEV;
  610. }
  611. int device_get_child_by_of_offset(const struct udevice *parent, int node,
  612. struct udevice **devp)
  613. {
  614. struct udevice *dev;
  615. int ret;
  616. *devp = NULL;
  617. ret = device_find_child_by_of_offset(parent, node, &dev);
  618. return device_get_device_tail(dev, ret, devp);
  619. }
  620. static struct udevice *_device_find_global_by_ofnode(struct udevice *parent,
  621. ofnode ofnode)
  622. {
  623. struct udevice *dev, *found;
  624. if (ofnode_equal(dev_ofnode(parent), ofnode))
  625. return parent;
  626. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  627. found = _device_find_global_by_ofnode(dev, ofnode);
  628. if (found)
  629. return found;
  630. }
  631. return NULL;
  632. }
  633. int device_find_global_by_ofnode(ofnode ofnode, struct udevice **devp)
  634. {
  635. *devp = _device_find_global_by_ofnode(gd->dm_root, ofnode);
  636. return *devp ? 0 : -ENOENT;
  637. }
  638. int device_get_global_by_ofnode(ofnode ofnode, struct udevice **devp)
  639. {
  640. struct udevice *dev;
  641. dev = _device_find_global_by_ofnode(gd->dm_root, ofnode);
  642. return device_get_device_tail(dev, dev ? 0 : -ENOENT, devp);
  643. }
  644. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  645. int device_get_by_driver_info(const struct driver_info *info,
  646. struct udevice **devp)
  647. {
  648. struct driver_info *info_base =
  649. ll_entry_start(struct driver_info, driver_info);
  650. int idx = info - info_base;
  651. struct driver_rt *drt = gd_dm_driver_rt() + idx;
  652. struct udevice *dev;
  653. dev = drt->dev;
  654. *devp = NULL;
  655. return device_get_device_tail(dev, dev ? 0 : -ENOENT, devp);
  656. }
  657. int device_get_by_driver_info_idx(uint idx, struct udevice **devp)
  658. {
  659. struct driver_rt *drt = gd_dm_driver_rt() + idx;
  660. struct udevice *dev;
  661. dev = drt->dev;
  662. *devp = NULL;
  663. return device_get_device_tail(dev, dev ? 0 : -ENOENT, devp);
  664. }
  665. #endif
  666. int device_find_first_child(const struct udevice *parent, struct udevice **devp)
  667. {
  668. if (list_empty(&parent->child_head)) {
  669. *devp = NULL;
  670. } else {
  671. *devp = list_first_entry(&parent->child_head, struct udevice,
  672. sibling_node);
  673. }
  674. return 0;
  675. }
  676. int device_find_next_child(struct udevice **devp)
  677. {
  678. struct udevice *dev = *devp;
  679. struct udevice *parent = dev->parent;
  680. if (list_is_last(&dev->sibling_node, &parent->child_head)) {
  681. *devp = NULL;
  682. } else {
  683. *devp = list_entry(dev->sibling_node.next, struct udevice,
  684. sibling_node);
  685. }
  686. return 0;
  687. }
  688. int device_find_first_inactive_child(const struct udevice *parent,
  689. enum uclass_id uclass_id,
  690. struct udevice **devp)
  691. {
  692. struct udevice *dev;
  693. *devp = NULL;
  694. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  695. if (!device_active(dev) &&
  696. device_get_uclass_id(dev) == uclass_id) {
  697. *devp = dev;
  698. return 0;
  699. }
  700. }
  701. return -ENODEV;
  702. }
  703. int device_find_first_child_by_uclass(const struct udevice *parent,
  704. enum uclass_id uclass_id,
  705. struct udevice **devp)
  706. {
  707. struct udevice *dev;
  708. *devp = NULL;
  709. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  710. if (device_get_uclass_id(dev) == uclass_id) {
  711. *devp = dev;
  712. return 0;
  713. }
  714. }
  715. return -ENODEV;
  716. }
  717. int device_find_child_by_name(const struct udevice *parent, const char *name,
  718. struct udevice **devp)
  719. {
  720. struct udevice *dev;
  721. *devp = NULL;
  722. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  723. if (!strcmp(dev->name, name)) {
  724. *devp = dev;
  725. return 0;
  726. }
  727. }
  728. return -ENODEV;
  729. }
  730. int device_first_child_err(struct udevice *parent, struct udevice **devp)
  731. {
  732. struct udevice *dev;
  733. device_find_first_child(parent, &dev);
  734. if (!dev)
  735. return -ENODEV;
  736. return device_get_device_tail(dev, 0, devp);
  737. }
  738. int device_next_child_err(struct udevice **devp)
  739. {
  740. struct udevice *dev = *devp;
  741. device_find_next_child(&dev);
  742. if (!dev)
  743. return -ENODEV;
  744. return device_get_device_tail(dev, 0, devp);
  745. }
  746. int device_first_child_ofdata_err(struct udevice *parent, struct udevice **devp)
  747. {
  748. struct udevice *dev;
  749. int ret;
  750. device_find_first_child(parent, &dev);
  751. if (!dev)
  752. return -ENODEV;
  753. ret = device_ofdata_to_platdata(dev);
  754. if (ret)
  755. return ret;
  756. *devp = dev;
  757. return 0;
  758. }
  759. int device_next_child_ofdata_err(struct udevice **devp)
  760. {
  761. struct udevice *dev = *devp;
  762. int ret;
  763. device_find_next_child(&dev);
  764. if (!dev)
  765. return -ENODEV;
  766. ret = device_ofdata_to_platdata(dev);
  767. if (ret)
  768. return ret;
  769. *devp = dev;
  770. return 0;
  771. }
  772. struct udevice *dev_get_parent(const struct udevice *child)
  773. {
  774. return child->parent;
  775. }
  776. ulong dev_get_driver_data(const struct udevice *dev)
  777. {
  778. return dev->driver_data;
  779. }
  780. const void *dev_get_driver_ops(const struct udevice *dev)
  781. {
  782. if (!dev || !dev->driver->ops)
  783. return NULL;
  784. return dev->driver->ops;
  785. }
  786. enum uclass_id device_get_uclass_id(const struct udevice *dev)
  787. {
  788. return dev->uclass->uc_drv->id;
  789. }
  790. const char *dev_get_uclass_name(const struct udevice *dev)
  791. {
  792. if (!dev)
  793. return NULL;
  794. return dev->uclass->uc_drv->name;
  795. }
  796. bool device_has_children(const struct udevice *dev)
  797. {
  798. return !list_empty(&dev->child_head);
  799. }
  800. bool device_has_active_children(const struct udevice *dev)
  801. {
  802. struct udevice *child;
  803. for (device_find_first_child(dev, &child);
  804. child;
  805. device_find_next_child(&child)) {
  806. if (device_active(child))
  807. return true;
  808. }
  809. return false;
  810. }
  811. bool device_is_last_sibling(const struct udevice *dev)
  812. {
  813. struct udevice *parent = dev->parent;
  814. if (!parent)
  815. return false;
  816. return list_is_last(&dev->sibling_node, &parent->child_head);
  817. }
  818. void device_set_name_alloced(struct udevice *dev)
  819. {
  820. dev->flags |= DM_FLAG_NAME_ALLOCED;
  821. }
  822. int device_set_name(struct udevice *dev, const char *name)
  823. {
  824. name = strdup(name);
  825. if (!name)
  826. return -ENOMEM;
  827. dev->name = name;
  828. device_set_name_alloced(dev);
  829. return 0;
  830. }
  831. #if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
  832. bool device_is_compatible(const struct udevice *dev, const char *compat)
  833. {
  834. return ofnode_device_is_compatible(dev_ofnode(dev), compat);
  835. }
  836. bool of_machine_is_compatible(const char *compat)
  837. {
  838. const void *fdt = gd->fdt_blob;
  839. return !fdt_node_check_compatible(fdt, 0, compat);
  840. }
  841. int dev_disable_by_path(const char *path)
  842. {
  843. struct uclass *uc;
  844. ofnode node = ofnode_path(path);
  845. struct udevice *dev;
  846. int ret = 1;
  847. if (!of_live_active())
  848. return -ENOSYS;
  849. list_for_each_entry(uc, &gd->uclass_root, sibling_node) {
  850. ret = uclass_find_device_by_ofnode(uc->uc_drv->id, node, &dev);
  851. if (!ret)
  852. break;
  853. }
  854. if (ret)
  855. return ret;
  856. ret = device_remove(dev, DM_REMOVE_NORMAL);
  857. if (ret)
  858. return ret;
  859. ret = device_unbind(dev);
  860. if (ret)
  861. return ret;
  862. return ofnode_set_enabled(node, false);
  863. }
  864. int dev_enable_by_path(const char *path)
  865. {
  866. ofnode node = ofnode_path(path);
  867. ofnode pnode = ofnode_get_parent(node);
  868. struct udevice *parent;
  869. int ret = 1;
  870. if (!of_live_active())
  871. return -ENOSYS;
  872. ret = device_find_by_ofnode(pnode, &parent);
  873. if (ret)
  874. return ret;
  875. ret = ofnode_set_enabled(node, true);
  876. if (ret)
  877. return ret;
  878. return lists_bind_fdt(parent, node, NULL, false);
  879. }
  880. #endif