device.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Device manager
  4. *
  5. * Copyright (c) 2013 Google, Inc
  6. *
  7. * (C) Copyright 2012
  8. * Pavel Herrmann <morpheus.ibis@gmail.com>
  9. */
  10. #include <common.h>
  11. #include <cpu_func.h>
  12. #include <log.h>
  13. #include <asm/global_data.h>
  14. #include <asm/io.h>
  15. #include <clk.h>
  16. #include <fdtdec.h>
  17. #include <fdt_support.h>
  18. #include <malloc.h>
  19. #include <asm/cache.h>
  20. #include <dm/device.h>
  21. #include <dm/device-internal.h>
  22. #include <dm/lists.h>
  23. #include <dm/of_access.h>
  24. #include <dm/pinctrl.h>
  25. #include <dm/platdata.h>
  26. #include <dm/read.h>
  27. #include <dm/uclass.h>
  28. #include <dm/uclass-internal.h>
  29. #include <dm/util.h>
  30. #include <linux/err.h>
  31. #include <linux/list.h>
  32. #include <power-domain.h>
  33. DECLARE_GLOBAL_DATA_PTR;
  34. static int device_bind_common(struct udevice *parent, const struct driver *drv,
  35. const char *name, void *plat,
  36. ulong driver_data, ofnode node,
  37. uint of_plat_size, struct udevice **devp)
  38. {
  39. struct udevice *dev;
  40. struct uclass *uc;
  41. int size, ret = 0;
  42. bool auto_seq = true;
  43. void *ptr;
  44. if (CONFIG_IS_ENABLED(OF_PLATDATA_NO_BIND))
  45. return -ENOSYS;
  46. if (devp)
  47. *devp = NULL;
  48. if (!name)
  49. return -EINVAL;
  50. ret = uclass_get(drv->id, &uc);
  51. if (ret) {
  52. debug("Missing uclass for driver %s\n", drv->name);
  53. return ret;
  54. }
  55. dev = calloc(1, sizeof(struct udevice));
  56. if (!dev)
  57. return -ENOMEM;
  58. INIT_LIST_HEAD(&dev->sibling_node);
  59. INIT_LIST_HEAD(&dev->child_head);
  60. INIT_LIST_HEAD(&dev->uclass_node);
  61. #ifdef CONFIG_DEVRES
  62. INIT_LIST_HEAD(&dev->devres_head);
  63. #endif
  64. dev_set_plat(dev, plat);
  65. dev->driver_data = driver_data;
  66. dev->name = name;
  67. dev_set_ofnode(dev, node);
  68. dev->parent = parent;
  69. dev->driver = drv;
  70. dev->uclass = uc;
  71. dev->seq_ = -1;
  72. if (CONFIG_IS_ENABLED(DM_SEQ_ALIAS) &&
  73. (uc->uc_drv->flags & DM_UC_FLAG_SEQ_ALIAS)) {
  74. /*
  75. * Some devices, such as a SPI bus, I2C bus and serial ports
  76. * are numbered using aliases.
  77. */
  78. if (CONFIG_IS_ENABLED(OF_CONTROL) &&
  79. !CONFIG_IS_ENABLED(OF_PLATDATA)) {
  80. if (uc->uc_drv->name && ofnode_valid(node)) {
  81. if (!dev_read_alias_seq(dev, &dev->seq_)) {
  82. auto_seq = false;
  83. log_debug(" - seq=%d\n", dev->seq_);
  84. }
  85. }
  86. }
  87. }
  88. if (auto_seq && !(uc->uc_drv->flags & DM_UC_FLAG_NO_AUTO_SEQ))
  89. dev->seq_ = uclass_find_next_free_seq(uc);
  90. /* Check if we need to allocate plat */
  91. if (drv->plat_auto) {
  92. bool alloc = !plat;
  93. /*
  94. * For of-platdata, we try use the existing data, but if
  95. * plat_auto is larger, we must allocate a new space
  96. */
  97. if (CONFIG_IS_ENABLED(OF_PLATDATA)) {
  98. if (of_plat_size)
  99. dev_or_flags(dev, DM_FLAG_OF_PLATDATA);
  100. if (of_plat_size < drv->plat_auto)
  101. alloc = true;
  102. }
  103. if (alloc) {
  104. dev_or_flags(dev, DM_FLAG_ALLOC_PDATA);
  105. ptr = calloc(1, drv->plat_auto);
  106. if (!ptr) {
  107. ret = -ENOMEM;
  108. goto fail_alloc1;
  109. }
  110. /*
  111. * For of-platdata, copy the old plat into the new
  112. * space
  113. */
  114. if (CONFIG_IS_ENABLED(OF_PLATDATA) && plat)
  115. memcpy(ptr, plat, of_plat_size);
  116. dev_set_plat(dev, ptr);
  117. }
  118. }
  119. size = uc->uc_drv->per_device_plat_auto;
  120. if (size) {
  121. dev_or_flags(dev, DM_FLAG_ALLOC_UCLASS_PDATA);
  122. ptr = calloc(1, size);
  123. if (!ptr) {
  124. ret = -ENOMEM;
  125. goto fail_alloc2;
  126. }
  127. dev_set_uclass_plat(dev, ptr);
  128. }
  129. if (parent) {
  130. size = parent->driver->per_child_plat_auto;
  131. if (!size)
  132. size = parent->uclass->uc_drv->per_child_plat_auto;
  133. if (size) {
  134. dev_or_flags(dev, DM_FLAG_ALLOC_PARENT_PDATA);
  135. ptr = calloc(1, size);
  136. if (!ptr) {
  137. ret = -ENOMEM;
  138. goto fail_alloc3;
  139. }
  140. dev_set_parent_plat(dev, ptr);
  141. }
  142. /* put dev into parent's successor list */
  143. list_add_tail(&dev->sibling_node, &parent->child_head);
  144. }
  145. ret = uclass_bind_device(dev);
  146. if (ret)
  147. goto fail_uclass_bind;
  148. /* if we fail to bind we remove device from successors and free it */
  149. if (drv->bind) {
  150. ret = drv->bind(dev);
  151. if (ret)
  152. goto fail_bind;
  153. }
  154. if (parent && parent->driver->child_post_bind) {
  155. ret = parent->driver->child_post_bind(dev);
  156. if (ret)
  157. goto fail_child_post_bind;
  158. }
  159. if (uc->uc_drv->post_bind) {
  160. ret = uc->uc_drv->post_bind(dev);
  161. if (ret)
  162. goto fail_uclass_post_bind;
  163. }
  164. if (parent)
  165. pr_debug("Bound device %s to %s\n", dev->name, parent->name);
  166. if (devp)
  167. *devp = dev;
  168. dev_or_flags(dev, DM_FLAG_BOUND);
  169. return 0;
  170. fail_uclass_post_bind:
  171. /* There is no child unbind() method, so no clean-up required */
  172. fail_child_post_bind:
  173. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  174. if (drv->unbind && drv->unbind(dev)) {
  175. dm_warn("unbind() method failed on dev '%s' on error path\n",
  176. dev->name);
  177. }
  178. }
  179. fail_bind:
  180. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  181. if (uclass_unbind_device(dev)) {
  182. dm_warn("Failed to unbind dev '%s' on error path\n",
  183. dev->name);
  184. }
  185. }
  186. fail_uclass_bind:
  187. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  188. list_del(&dev->sibling_node);
  189. if (dev_get_flags(dev) & DM_FLAG_ALLOC_PARENT_PDATA) {
  190. free(dev_get_parent_plat(dev));
  191. dev_set_parent_plat(dev, NULL);
  192. }
  193. }
  194. fail_alloc3:
  195. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  196. if (dev_get_flags(dev) & DM_FLAG_ALLOC_UCLASS_PDATA) {
  197. free(dev_get_uclass_plat(dev));
  198. dev_set_uclass_plat(dev, NULL);
  199. }
  200. }
  201. fail_alloc2:
  202. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  203. if (dev_get_flags(dev) & DM_FLAG_ALLOC_PDATA) {
  204. free(dev_get_plat(dev));
  205. dev_set_plat(dev, NULL);
  206. }
  207. }
  208. fail_alloc1:
  209. devres_release_all(dev);
  210. free(dev);
  211. return ret;
  212. }
  213. int device_bind_with_driver_data(struct udevice *parent,
  214. const struct driver *drv, const char *name,
  215. ulong driver_data, ofnode node,
  216. struct udevice **devp)
  217. {
  218. return device_bind_common(parent, drv, name, NULL, driver_data, node,
  219. 0, devp);
  220. }
  221. int device_bind(struct udevice *parent, const struct driver *drv,
  222. const char *name, void *plat, ofnode node,
  223. struct udevice **devp)
  224. {
  225. return device_bind_common(parent, drv, name, plat, 0, node, 0,
  226. devp);
  227. }
  228. int device_bind_by_name(struct udevice *parent, bool pre_reloc_only,
  229. const struct driver_info *info, struct udevice **devp)
  230. {
  231. struct driver *drv;
  232. uint plat_size = 0;
  233. int ret;
  234. drv = lists_driver_lookup_name(info->name);
  235. if (!drv)
  236. return -ENOENT;
  237. if (pre_reloc_only && !(drv->flags & DM_FLAG_PRE_RELOC))
  238. return -EPERM;
  239. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  240. plat_size = info->plat_size;
  241. #endif
  242. ret = device_bind_common(parent, drv, info->name, (void *)info->plat, 0,
  243. ofnode_null(), plat_size, devp);
  244. if (ret)
  245. return ret;
  246. return ret;
  247. }
  248. int device_reparent(struct udevice *dev, struct udevice *new_parent)
  249. {
  250. struct udevice *pos, *n;
  251. assert(dev);
  252. assert(new_parent);
  253. list_for_each_entry_safe(pos, n, &dev->parent->child_head,
  254. sibling_node) {
  255. if (pos->driver != dev->driver)
  256. continue;
  257. list_del(&dev->sibling_node);
  258. list_add_tail(&dev->sibling_node, &new_parent->child_head);
  259. dev->parent = new_parent;
  260. break;
  261. }
  262. return 0;
  263. }
  264. static void *alloc_priv(int size, uint flags)
  265. {
  266. void *priv;
  267. if (flags & DM_FLAG_ALLOC_PRIV_DMA) {
  268. size = ROUND(size, ARCH_DMA_MINALIGN);
  269. priv = memalign(ARCH_DMA_MINALIGN, size);
  270. if (priv) {
  271. memset(priv, '\0', size);
  272. /*
  273. * Ensure that the zero bytes are flushed to memory.
  274. * This prevents problems if the driver uses this as
  275. * both an input and an output buffer:
  276. *
  277. * 1. Zeroes written to buffer (here) and sit in the
  278. * cache
  279. * 2. Driver issues a read command to DMA
  280. * 3. CPU runs out of cache space and evicts some cache
  281. * data in the buffer, writing zeroes to RAM from
  282. * the memset() above
  283. * 4. DMA completes
  284. * 5. Buffer now has some DMA data and some zeroes
  285. * 6. Data being read is now incorrect
  286. *
  287. * To prevent this, ensure that the cache is clean
  288. * within this range at the start. The driver can then
  289. * use normal flush-after-write, invalidate-before-read
  290. * procedures.
  291. *
  292. * TODO(sjg@chromium.org): Drop this microblaze
  293. * exception.
  294. */
  295. #ifndef CONFIG_MICROBLAZE
  296. flush_dcache_range((ulong)priv, (ulong)priv + size);
  297. #endif
  298. }
  299. } else {
  300. priv = calloc(1, size);
  301. }
  302. return priv;
  303. }
  304. /**
  305. * device_alloc_priv() - Allocate priv/plat data required by the device
  306. *
  307. * @dev: Device to process
  308. * @return 0 if OK, -ENOMEM if out of memory
  309. */
  310. static int device_alloc_priv(struct udevice *dev)
  311. {
  312. const struct driver *drv;
  313. void *ptr;
  314. int size;
  315. drv = dev->driver;
  316. assert(drv);
  317. /* Allocate private data if requested and not reentered */
  318. if (drv->priv_auto && !dev_get_priv(dev)) {
  319. ptr = alloc_priv(drv->priv_auto, drv->flags);
  320. if (!ptr)
  321. return -ENOMEM;
  322. dev_set_priv(dev, ptr);
  323. }
  324. /* Allocate private data if requested and not reentered */
  325. size = dev->uclass->uc_drv->per_device_auto;
  326. if (size && !dev_get_uclass_priv(dev)) {
  327. ptr = alloc_priv(size, dev->uclass->uc_drv->flags);
  328. if (!ptr)
  329. return -ENOMEM;
  330. dev_set_uclass_priv(dev, ptr);
  331. }
  332. /* Allocate parent data for this child */
  333. if (dev->parent) {
  334. size = dev->parent->driver->per_child_auto;
  335. if (!size)
  336. size = dev->parent->uclass->uc_drv->per_child_auto;
  337. if (size && !dev_get_parent_priv(dev)) {
  338. ptr = alloc_priv(size, drv->flags);
  339. if (!ptr)
  340. return -ENOMEM;
  341. dev_set_parent_priv(dev, ptr);
  342. }
  343. }
  344. return 0;
  345. }
  346. int device_of_to_plat(struct udevice *dev)
  347. {
  348. const struct driver *drv;
  349. int ret;
  350. if (!dev)
  351. return -EINVAL;
  352. if (dev_get_flags(dev) & DM_FLAG_PLATDATA_VALID)
  353. return 0;
  354. /*
  355. * This is not needed if binding is disabled, since data is allocated
  356. * at build time.
  357. */
  358. if (!CONFIG_IS_ENABLED(OF_PLATDATA_NO_BIND)) {
  359. /* Ensure all parents have ofdata */
  360. if (dev->parent) {
  361. ret = device_of_to_plat(dev->parent);
  362. if (ret)
  363. goto fail;
  364. /*
  365. * The device might have already been probed during
  366. * the call to device_probe() on its parent device
  367. * (e.g. PCI bridge devices). Test the flags again
  368. * so that we don't mess up the device.
  369. */
  370. if (dev_get_flags(dev) & DM_FLAG_PLATDATA_VALID)
  371. return 0;
  372. }
  373. ret = device_alloc_priv(dev);
  374. if (ret)
  375. goto fail;
  376. }
  377. drv = dev->driver;
  378. assert(drv);
  379. if (drv->of_to_plat &&
  380. (CONFIG_IS_ENABLED(OF_PLATDATA) || dev_has_ofnode(dev))) {
  381. ret = drv->of_to_plat(dev);
  382. if (ret)
  383. goto fail;
  384. }
  385. dev_or_flags(dev, DM_FLAG_PLATDATA_VALID);
  386. return 0;
  387. fail:
  388. device_free(dev);
  389. return ret;
  390. }
  391. /**
  392. * device_get_dma_constraints() - Populate device's DMA constraints
  393. *
  394. * Gets a device's DMA constraints from firmware. This information is later
  395. * used by drivers to translate physcal addresses to the device's bus address
  396. * space. For now only device-tree is supported.
  397. *
  398. * @dev: Pointer to target device
  399. * Return: 0 if OK or if no DMA constraints were found, error otherwise
  400. */
  401. static int device_get_dma_constraints(struct udevice *dev)
  402. {
  403. struct udevice *parent = dev->parent;
  404. phys_addr_t cpu = 0;
  405. dma_addr_t bus = 0;
  406. u64 size = 0;
  407. int ret;
  408. if (!CONFIG_IS_ENABLED(DM_DMA) || !parent || !dev_has_ofnode(parent))
  409. return 0;
  410. /*
  411. * We start parsing for dma-ranges from the device's bus node. This is
  412. * specially important on nested buses.
  413. */
  414. ret = dev_get_dma_range(parent, &cpu, &bus, &size);
  415. /* Don't return an error if no 'dma-ranges' were found */
  416. if (ret && ret != -ENOENT) {
  417. dm_warn("%s: failed to get DMA range, %d\n", dev->name, ret);
  418. return ret;
  419. }
  420. dev_set_dma_offset(dev, cpu - bus);
  421. return 0;
  422. }
  423. int device_probe(struct udevice *dev)
  424. {
  425. const struct driver *drv;
  426. int ret;
  427. if (!dev)
  428. return -EINVAL;
  429. if (dev_get_flags(dev) & DM_FLAG_ACTIVATED)
  430. return 0;
  431. drv = dev->driver;
  432. assert(drv);
  433. ret = device_of_to_plat(dev);
  434. if (ret)
  435. goto fail;
  436. /* Ensure all parents are probed */
  437. if (dev->parent) {
  438. ret = device_probe(dev->parent);
  439. if (ret)
  440. goto fail;
  441. /*
  442. * The device might have already been probed during
  443. * the call to device_probe() on its parent device
  444. * (e.g. PCI bridge devices). Test the flags again
  445. * so that we don't mess up the device.
  446. */
  447. if (dev_get_flags(dev) & DM_FLAG_ACTIVATED)
  448. return 0;
  449. }
  450. dev_or_flags(dev, DM_FLAG_ACTIVATED);
  451. /*
  452. * Process pinctrl for everything except the root device, and
  453. * continue regardless of the result of pinctrl. Don't process pinctrl
  454. * settings for pinctrl devices since the device may not yet be
  455. * probed.
  456. *
  457. * This call can produce some non-intuitive results. For example, on an
  458. * x86 device where dev is the main PCI bus, the pinctrl device may be
  459. * child or grandchild of that bus, meaning that the child will be
  460. * probed here. If the child happens to be the P2SB and the pinctrl
  461. * device is a child of that, then both the pinctrl and P2SB will be
  462. * probed by this call. This works because the DM_FLAG_ACTIVATED flag
  463. * is set just above. However, the PCI bus' probe() method and
  464. * associated uclass methods have not yet been called.
  465. */
  466. if (dev->parent && device_get_uclass_id(dev) != UCLASS_PINCTRL)
  467. pinctrl_select_state(dev, "default");
  468. if (CONFIG_IS_ENABLED(POWER_DOMAIN) && dev->parent &&
  469. (device_get_uclass_id(dev) != UCLASS_POWER_DOMAIN) &&
  470. !(drv->flags & DM_FLAG_DEFAULT_PD_CTRL_OFF)) {
  471. ret = dev_power_domain_on(dev);
  472. if (ret)
  473. goto fail;
  474. }
  475. ret = device_get_dma_constraints(dev);
  476. if (ret)
  477. goto fail;
  478. ret = uclass_pre_probe_device(dev);
  479. if (ret)
  480. goto fail;
  481. if (dev->parent && dev->parent->driver->child_pre_probe) {
  482. ret = dev->parent->driver->child_pre_probe(dev);
  483. if (ret)
  484. goto fail;
  485. }
  486. /* Only handle devices that have a valid ofnode */
  487. if (dev_has_ofnode(dev)) {
  488. /*
  489. * Process 'assigned-{clocks/clock-parents/clock-rates}'
  490. * properties
  491. */
  492. ret = clk_set_defaults(dev, CLK_DEFAULTS_PRE);
  493. if (ret)
  494. goto fail;
  495. }
  496. if (drv->probe) {
  497. ret = drv->probe(dev);
  498. if (ret)
  499. goto fail;
  500. }
  501. ret = uclass_post_probe_device(dev);
  502. if (ret)
  503. goto fail_uclass;
  504. if (dev->parent && device_get_uclass_id(dev) == UCLASS_PINCTRL)
  505. pinctrl_select_state(dev, "default");
  506. return 0;
  507. fail_uclass:
  508. if (device_remove(dev, DM_REMOVE_NORMAL)) {
  509. dm_warn("%s: Device '%s' failed to remove on error path\n",
  510. __func__, dev->name);
  511. }
  512. fail:
  513. dev_bic_flags(dev, DM_FLAG_ACTIVATED);
  514. device_free(dev);
  515. return ret;
  516. }
  517. void *dev_get_plat(const struct udevice *dev)
  518. {
  519. if (!dev) {
  520. dm_warn("%s: null device\n", __func__);
  521. return NULL;
  522. }
  523. return dm_priv_to_rw(dev->plat_);
  524. }
  525. void *dev_get_parent_plat(const struct udevice *dev)
  526. {
  527. if (!dev) {
  528. dm_warn("%s: null device\n", __func__);
  529. return NULL;
  530. }
  531. return dm_priv_to_rw(dev->parent_plat_);
  532. }
  533. void *dev_get_uclass_plat(const struct udevice *dev)
  534. {
  535. if (!dev) {
  536. dm_warn("%s: null device\n", __func__);
  537. return NULL;
  538. }
  539. return dm_priv_to_rw(dev->uclass_plat_);
  540. }
  541. void *dev_get_priv(const struct udevice *dev)
  542. {
  543. if (!dev) {
  544. dm_warn("%s: null device\n", __func__);
  545. return NULL;
  546. }
  547. return dm_priv_to_rw(dev->priv_);
  548. }
  549. void *dev_get_uclass_priv(const struct udevice *dev)
  550. {
  551. if (!dev) {
  552. dm_warn("%s: null device\n", __func__);
  553. return NULL;
  554. }
  555. return dm_priv_to_rw(dev->uclass_priv_);
  556. }
  557. void *dev_get_parent_priv(const struct udevice *dev)
  558. {
  559. if (!dev) {
  560. dm_warn("%s: null device\n", __func__);
  561. return NULL;
  562. }
  563. return dm_priv_to_rw(dev->parent_priv_);
  564. }
  565. static int device_get_device_tail(struct udevice *dev, int ret,
  566. struct udevice **devp)
  567. {
  568. if (ret)
  569. return ret;
  570. ret = device_probe(dev);
  571. if (ret)
  572. return ret;
  573. *devp = dev;
  574. return 0;
  575. }
  576. #if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
  577. /**
  578. * device_find_by_ofnode() - Return device associated with given ofnode
  579. *
  580. * The returned device is *not* activated.
  581. *
  582. * @node: The ofnode for which a associated device should be looked up
  583. * @devp: Pointer to structure to hold the found device
  584. * Return: 0 if OK, -ve on error
  585. */
  586. static int device_find_by_ofnode(ofnode node, struct udevice **devp)
  587. {
  588. struct uclass *uc;
  589. struct udevice *dev;
  590. int ret;
  591. list_for_each_entry(uc, gd->uclass_root, sibling_node) {
  592. ret = uclass_find_device_by_ofnode(uc->uc_drv->id, node,
  593. &dev);
  594. if (!ret || dev) {
  595. *devp = dev;
  596. return 0;
  597. }
  598. }
  599. return -ENODEV;
  600. }
  601. #endif
  602. int device_get_child(const struct udevice *parent, int index,
  603. struct udevice **devp)
  604. {
  605. struct udevice *dev;
  606. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  607. if (!index--)
  608. return device_get_device_tail(dev, 0, devp);
  609. }
  610. return -ENODEV;
  611. }
  612. int device_get_child_count(const struct udevice *parent)
  613. {
  614. struct udevice *dev;
  615. int count = 0;
  616. list_for_each_entry(dev, &parent->child_head, sibling_node)
  617. count++;
  618. return count;
  619. }
  620. int device_find_child_by_seq(const struct udevice *parent, int seq,
  621. struct udevice **devp)
  622. {
  623. struct udevice *dev;
  624. *devp = NULL;
  625. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  626. if (dev->seq_ == seq) {
  627. *devp = dev;
  628. return 0;
  629. }
  630. }
  631. return -ENODEV;
  632. }
  633. int device_get_child_by_seq(const struct udevice *parent, int seq,
  634. struct udevice **devp)
  635. {
  636. struct udevice *dev;
  637. int ret;
  638. *devp = NULL;
  639. ret = device_find_child_by_seq(parent, seq, &dev);
  640. return device_get_device_tail(dev, ret, devp);
  641. }
  642. int device_find_child_by_of_offset(const struct udevice *parent, int of_offset,
  643. struct udevice **devp)
  644. {
  645. struct udevice *dev;
  646. *devp = NULL;
  647. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  648. if (dev_of_offset(dev) == of_offset) {
  649. *devp = dev;
  650. return 0;
  651. }
  652. }
  653. return -ENODEV;
  654. }
  655. int device_get_child_by_of_offset(const struct udevice *parent, int node,
  656. struct udevice **devp)
  657. {
  658. struct udevice *dev;
  659. int ret;
  660. *devp = NULL;
  661. ret = device_find_child_by_of_offset(parent, node, &dev);
  662. return device_get_device_tail(dev, ret, devp);
  663. }
  664. static struct udevice *_device_find_global_by_ofnode(struct udevice *parent,
  665. ofnode ofnode)
  666. {
  667. struct udevice *dev, *found;
  668. if (ofnode_equal(dev_ofnode(parent), ofnode))
  669. return parent;
  670. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  671. found = _device_find_global_by_ofnode(dev, ofnode);
  672. if (found)
  673. return found;
  674. }
  675. return NULL;
  676. }
  677. int device_find_global_by_ofnode(ofnode ofnode, struct udevice **devp)
  678. {
  679. *devp = _device_find_global_by_ofnode(gd->dm_root, ofnode);
  680. return *devp ? 0 : -ENOENT;
  681. }
  682. int device_get_global_by_ofnode(ofnode ofnode, struct udevice **devp)
  683. {
  684. struct udevice *dev;
  685. dev = _device_find_global_by_ofnode(gd->dm_root, ofnode);
  686. return device_get_device_tail(dev, dev ? 0 : -ENOENT, devp);
  687. }
  688. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  689. int device_get_by_ofplat_idx(uint idx, struct udevice **devp)
  690. {
  691. struct udevice *dev;
  692. if (CONFIG_IS_ENABLED(OF_PLATDATA_INST)) {
  693. struct udevice *base = ll_entry_start(struct udevice, udevice);
  694. dev = base + idx;
  695. } else {
  696. struct driver_rt *drt = gd_dm_driver_rt() + idx;
  697. dev = drt->dev;
  698. }
  699. *devp = NULL;
  700. return device_get_device_tail(dev, dev ? 0 : -ENOENT, devp);
  701. }
  702. #endif
  703. int device_find_first_child(const struct udevice *parent, struct udevice **devp)
  704. {
  705. if (list_empty(&parent->child_head)) {
  706. *devp = NULL;
  707. } else {
  708. *devp = list_first_entry(&parent->child_head, struct udevice,
  709. sibling_node);
  710. }
  711. return 0;
  712. }
  713. int device_find_next_child(struct udevice **devp)
  714. {
  715. struct udevice *dev = *devp;
  716. struct udevice *parent = dev->parent;
  717. if (list_is_last(&dev->sibling_node, &parent->child_head)) {
  718. *devp = NULL;
  719. } else {
  720. *devp = list_entry(dev->sibling_node.next, struct udevice,
  721. sibling_node);
  722. }
  723. return 0;
  724. }
  725. int device_find_first_inactive_child(const struct udevice *parent,
  726. enum uclass_id uclass_id,
  727. struct udevice **devp)
  728. {
  729. struct udevice *dev;
  730. *devp = NULL;
  731. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  732. if (!device_active(dev) &&
  733. device_get_uclass_id(dev) == uclass_id) {
  734. *devp = dev;
  735. return 0;
  736. }
  737. }
  738. return -ENODEV;
  739. }
  740. int device_find_first_child_by_uclass(const struct udevice *parent,
  741. enum uclass_id uclass_id,
  742. struct udevice **devp)
  743. {
  744. struct udevice *dev;
  745. *devp = NULL;
  746. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  747. if (device_get_uclass_id(dev) == uclass_id) {
  748. *devp = dev;
  749. return 0;
  750. }
  751. }
  752. return -ENODEV;
  753. }
  754. int device_find_child_by_name(const struct udevice *parent, const char *name,
  755. struct udevice **devp)
  756. {
  757. struct udevice *dev;
  758. *devp = NULL;
  759. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  760. if (!strcmp(dev->name, name)) {
  761. *devp = dev;
  762. return 0;
  763. }
  764. }
  765. return -ENODEV;
  766. }
  767. int device_first_child_err(struct udevice *parent, struct udevice **devp)
  768. {
  769. struct udevice *dev;
  770. device_find_first_child(parent, &dev);
  771. if (!dev)
  772. return -ENODEV;
  773. return device_get_device_tail(dev, 0, devp);
  774. }
  775. int device_next_child_err(struct udevice **devp)
  776. {
  777. struct udevice *dev = *devp;
  778. device_find_next_child(&dev);
  779. if (!dev)
  780. return -ENODEV;
  781. return device_get_device_tail(dev, 0, devp);
  782. }
  783. int device_first_child_ofdata_err(struct udevice *parent, struct udevice **devp)
  784. {
  785. struct udevice *dev;
  786. int ret;
  787. device_find_first_child(parent, &dev);
  788. if (!dev)
  789. return -ENODEV;
  790. ret = device_of_to_plat(dev);
  791. if (ret)
  792. return ret;
  793. *devp = dev;
  794. return 0;
  795. }
  796. int device_next_child_ofdata_err(struct udevice **devp)
  797. {
  798. struct udevice *dev = *devp;
  799. int ret;
  800. device_find_next_child(&dev);
  801. if (!dev)
  802. return -ENODEV;
  803. ret = device_of_to_plat(dev);
  804. if (ret)
  805. return ret;
  806. *devp = dev;
  807. return 0;
  808. }
  809. struct udevice *dev_get_parent(const struct udevice *child)
  810. {
  811. return child->parent;
  812. }
  813. ulong dev_get_driver_data(const struct udevice *dev)
  814. {
  815. return dev->driver_data;
  816. }
  817. const void *dev_get_driver_ops(const struct udevice *dev)
  818. {
  819. if (!dev || !dev->driver->ops)
  820. return NULL;
  821. return dev->driver->ops;
  822. }
  823. enum uclass_id device_get_uclass_id(const struct udevice *dev)
  824. {
  825. return dev->uclass->uc_drv->id;
  826. }
  827. const char *dev_get_uclass_name(const struct udevice *dev)
  828. {
  829. if (!dev)
  830. return NULL;
  831. return dev->uclass->uc_drv->name;
  832. }
  833. bool device_has_children(const struct udevice *dev)
  834. {
  835. return !list_empty(&dev->child_head);
  836. }
  837. bool device_has_active_children(const struct udevice *dev)
  838. {
  839. struct udevice *child;
  840. for (device_find_first_child(dev, &child);
  841. child;
  842. device_find_next_child(&child)) {
  843. if (device_active(child))
  844. return true;
  845. }
  846. return false;
  847. }
  848. bool device_is_last_sibling(const struct udevice *dev)
  849. {
  850. struct udevice *parent = dev->parent;
  851. if (!parent)
  852. return false;
  853. return list_is_last(&dev->sibling_node, &parent->child_head);
  854. }
  855. void device_set_name_alloced(struct udevice *dev)
  856. {
  857. dev_or_flags(dev, DM_FLAG_NAME_ALLOCED);
  858. }
  859. int device_set_name(struct udevice *dev, const char *name)
  860. {
  861. name = strdup(name);
  862. if (!name)
  863. return -ENOMEM;
  864. dev->name = name;
  865. device_set_name_alloced(dev);
  866. return 0;
  867. }
  868. void dev_set_priv(struct udevice *dev, void *priv)
  869. {
  870. dev->priv_ = priv;
  871. }
  872. void dev_set_parent_priv(struct udevice *dev, void *parent_priv)
  873. {
  874. dev->parent_priv_ = parent_priv;
  875. }
  876. void dev_set_uclass_priv(struct udevice *dev, void *uclass_priv)
  877. {
  878. dev->uclass_priv_ = uclass_priv;
  879. }
  880. void dev_set_plat(struct udevice *dev, void *plat)
  881. {
  882. dev->plat_ = plat;
  883. }
  884. void dev_set_parent_plat(struct udevice *dev, void *parent_plat)
  885. {
  886. dev->parent_plat_ = parent_plat;
  887. }
  888. void dev_set_uclass_plat(struct udevice *dev, void *uclass_plat)
  889. {
  890. dev->uclass_plat_ = uclass_plat;
  891. }
  892. #if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
  893. bool device_is_compatible(const struct udevice *dev, const char *compat)
  894. {
  895. return ofnode_device_is_compatible(dev_ofnode(dev), compat);
  896. }
  897. bool of_machine_is_compatible(const char *compat)
  898. {
  899. const void *fdt = gd->fdt_blob;
  900. return !fdt_node_check_compatible(fdt, 0, compat);
  901. }
  902. int dev_disable_by_path(const char *path)
  903. {
  904. struct uclass *uc;
  905. ofnode node = ofnode_path(path);
  906. struct udevice *dev;
  907. int ret = 1;
  908. if (!of_live_active())
  909. return -ENOSYS;
  910. list_for_each_entry(uc, gd->uclass_root, sibling_node) {
  911. ret = uclass_find_device_by_ofnode(uc->uc_drv->id, node, &dev);
  912. if (!ret)
  913. break;
  914. }
  915. if (ret)
  916. return ret;
  917. ret = device_remove(dev, DM_REMOVE_NORMAL);
  918. if (ret)
  919. return ret;
  920. ret = device_unbind(dev);
  921. if (ret)
  922. return ret;
  923. return ofnode_set_enabled(node, false);
  924. }
  925. int dev_enable_by_path(const char *path)
  926. {
  927. ofnode node = ofnode_path(path);
  928. ofnode pnode = ofnode_get_parent(node);
  929. struct udevice *parent;
  930. int ret = 1;
  931. if (!of_live_active())
  932. return -ENOSYS;
  933. ret = device_find_by_ofnode(pnode, &parent);
  934. if (ret)
  935. return ret;
  936. ret = ofnode_set_enabled(node, true);
  937. if (ret)
  938. return ret;
  939. return lists_bind_fdt(parent, node, NULL, false);
  940. }
  941. #endif
  942. #if CONFIG_IS_ENABLED(OF_PLATDATA_RT)
  943. static struct udevice_rt *dev_get_rt(const struct udevice *dev)
  944. {
  945. struct udevice *base = ll_entry_start(struct udevice, udevice);
  946. int idx = dev - base;
  947. struct udevice_rt *urt = gd_dm_udevice_rt() + idx;
  948. return urt;
  949. }
  950. u32 dev_get_flags(const struct udevice *dev)
  951. {
  952. const struct udevice_rt *urt = dev_get_rt(dev);
  953. return urt->flags_;
  954. }
  955. void dev_or_flags(const struct udevice *dev, u32 or)
  956. {
  957. struct udevice_rt *urt = dev_get_rt(dev);
  958. urt->flags_ |= or;
  959. }
  960. void dev_bic_flags(const struct udevice *dev, u32 bic)
  961. {
  962. struct udevice_rt *urt = dev_get_rt(dev);
  963. urt->flags_ &= ~bic;
  964. }
  965. #endif /* OF_PLATDATA_RT */