clk-uclass.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2015 Google, Inc
  4. * Written by Simon Glass <sjg@chromium.org>
  5. * Copyright (c) 2016, NVIDIA CORPORATION.
  6. * Copyright (c) 2018, Theobroma Systems Design und Consulting GmbH
  7. */
  8. #include <common.h>
  9. #include <clk.h>
  10. #include <clk-uclass.h>
  11. #include <dm.h>
  12. #include <dt-structs.h>
  13. #include <errno.h>
  14. #include <malloc.h>
  15. #include <dm/devres.h>
  16. #include <dm/read.h>
  17. #include <linux/clk-provider.h>
  18. #include <linux/err.h>
  19. static inline const struct clk_ops *clk_dev_ops(struct udevice *dev)
  20. {
  21. return (const struct clk_ops *)dev->driver->ops;
  22. }
  23. #if CONFIG_IS_ENABLED(OF_CONTROL)
  24. # if CONFIG_IS_ENABLED(OF_PLATDATA)
  25. int clk_get_by_index_platdata(struct udevice *dev, int index,
  26. struct phandle_1_arg *cells, struct clk *clk)
  27. {
  28. int ret;
  29. if (index != 0)
  30. return -ENOSYS;
  31. ret = uclass_get_device(UCLASS_CLK, 0, &clk->dev);
  32. if (ret)
  33. return ret;
  34. clk->id = cells[0].arg[0];
  35. return 0;
  36. }
  37. # else
  38. static int clk_of_xlate_default(struct clk *clk,
  39. struct ofnode_phandle_args *args)
  40. {
  41. debug("%s(clk=%p)\n", __func__, clk);
  42. if (args->args_count > 1) {
  43. debug("Invaild args_count: %d\n", args->args_count);
  44. return -EINVAL;
  45. }
  46. if (args->args_count)
  47. clk->id = args->args[0];
  48. else
  49. clk->id = 0;
  50. clk->data = 0;
  51. return 0;
  52. }
  53. static int clk_get_by_index_tail(int ret, ofnode node,
  54. struct ofnode_phandle_args *args,
  55. const char *list_name, int index,
  56. struct clk *clk)
  57. {
  58. struct udevice *dev_clk;
  59. const struct clk_ops *ops;
  60. assert(clk);
  61. clk->dev = NULL;
  62. if (ret)
  63. goto err;
  64. ret = uclass_get_device_by_ofnode(UCLASS_CLK, args->node, &dev_clk);
  65. if (ret) {
  66. debug("%s: uclass_get_device_by_of_offset failed: err=%d\n",
  67. __func__, ret);
  68. return ret;
  69. }
  70. clk->dev = dev_clk;
  71. ops = clk_dev_ops(dev_clk);
  72. if (ops->of_xlate)
  73. ret = ops->of_xlate(clk, args);
  74. else
  75. ret = clk_of_xlate_default(clk, args);
  76. if (ret) {
  77. debug("of_xlate() failed: %d\n", ret);
  78. return ret;
  79. }
  80. return clk_request(dev_clk, clk);
  81. err:
  82. debug("%s: Node '%s', property '%s', failed to request CLK index %d: %d\n",
  83. __func__, ofnode_get_name(node), list_name, index, ret);
  84. return ret;
  85. }
  86. static int clk_get_by_indexed_prop(struct udevice *dev, const char *prop_name,
  87. int index, struct clk *clk)
  88. {
  89. int ret;
  90. struct ofnode_phandle_args args;
  91. debug("%s(dev=%p, index=%d, clk=%p)\n", __func__, dev, index, clk);
  92. assert(clk);
  93. clk->dev = NULL;
  94. ret = dev_read_phandle_with_args(dev, prop_name, "#clock-cells", 0,
  95. index, &args);
  96. if (ret) {
  97. debug("%s: fdtdec_parse_phandle_with_args failed: err=%d\n",
  98. __func__, ret);
  99. return ret;
  100. }
  101. return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
  102. index > 0, clk);
  103. }
  104. int clk_get_by_index(struct udevice *dev, int index, struct clk *clk)
  105. {
  106. struct ofnode_phandle_args args;
  107. int ret;
  108. ret = dev_read_phandle_with_args(dev, "clocks", "#clock-cells", 0,
  109. index, &args);
  110. return clk_get_by_index_tail(ret, dev_ofnode(dev), &args, "clocks",
  111. index > 0, clk);
  112. }
  113. int clk_get_by_index_nodev(ofnode node, int index, struct clk *clk)
  114. {
  115. struct ofnode_phandle_args args;
  116. int ret;
  117. ret = ofnode_parse_phandle_with_args(node, "clocks", "#clock-cells", 0,
  118. index > 0, &args);
  119. return clk_get_by_index_tail(ret, node, &args, "clocks",
  120. index > 0, clk);
  121. }
  122. int clk_get_bulk(struct udevice *dev, struct clk_bulk *bulk)
  123. {
  124. int i, ret, err, count;
  125. bulk->count = 0;
  126. count = dev_count_phandle_with_args(dev, "clocks", "#clock-cells");
  127. if (count < 1)
  128. return count;
  129. bulk->clks = devm_kcalloc(dev, count, sizeof(struct clk), GFP_KERNEL);
  130. if (!bulk->clks)
  131. return -ENOMEM;
  132. for (i = 0; i < count; i++) {
  133. ret = clk_get_by_index(dev, i, &bulk->clks[i]);
  134. if (ret < 0)
  135. goto bulk_get_err;
  136. ++bulk->count;
  137. }
  138. return 0;
  139. bulk_get_err:
  140. err = clk_release_all(bulk->clks, bulk->count);
  141. if (err)
  142. debug("%s: could release all clocks for %p\n",
  143. __func__, dev);
  144. return ret;
  145. }
  146. static int clk_set_default_parents(struct udevice *dev, int stage)
  147. {
  148. struct clk clk, parent_clk;
  149. int index;
  150. int num_parents;
  151. int ret;
  152. num_parents = dev_count_phandle_with_args(dev, "assigned-clock-parents",
  153. "#clock-cells");
  154. if (num_parents < 0) {
  155. debug("%s: could not read assigned-clock-parents for %p\n",
  156. __func__, dev);
  157. return 0;
  158. }
  159. for (index = 0; index < num_parents; index++) {
  160. ret = clk_get_by_indexed_prop(dev, "assigned-clock-parents",
  161. index, &parent_clk);
  162. /* If -ENOENT, this is a no-op entry */
  163. if (ret == -ENOENT)
  164. continue;
  165. if (ret) {
  166. debug("%s: could not get parent clock %d for %s\n",
  167. __func__, index, dev_read_name(dev));
  168. return ret;
  169. }
  170. ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
  171. index, &clk);
  172. if (ret) {
  173. debug("%s: could not get assigned clock %d for %s\n",
  174. __func__, index, dev_read_name(dev));
  175. return ret;
  176. }
  177. /* This is clk provider device trying to reparent itself
  178. * It cannot be done right now but need to wait after the
  179. * device is probed
  180. */
  181. if (stage == 0 && clk.dev == dev)
  182. continue;
  183. if (stage > 0 && clk.dev != dev)
  184. /* do not setup twice the parent clocks */
  185. continue;
  186. ret = clk_set_parent(&clk, &parent_clk);
  187. /*
  188. * Not all drivers may support clock-reparenting (as of now).
  189. * Ignore errors due to this.
  190. */
  191. if (ret == -ENOSYS)
  192. continue;
  193. if (ret < 0) {
  194. debug("%s: failed to reparent clock %d for %s\n",
  195. __func__, index, dev_read_name(dev));
  196. return ret;
  197. }
  198. }
  199. return 0;
  200. }
  201. static int clk_set_default_rates(struct udevice *dev, int stage)
  202. {
  203. struct clk clk;
  204. int index;
  205. int num_rates;
  206. int size;
  207. int ret = 0;
  208. u32 *rates = NULL;
  209. size = dev_read_size(dev, "assigned-clock-rates");
  210. if (size < 0)
  211. return 0;
  212. num_rates = size / sizeof(u32);
  213. rates = calloc(num_rates, sizeof(u32));
  214. if (!rates)
  215. return -ENOMEM;
  216. ret = dev_read_u32_array(dev, "assigned-clock-rates", rates, num_rates);
  217. if (ret)
  218. goto fail;
  219. for (index = 0; index < num_rates; index++) {
  220. /* If 0 is passed, this is a no-op */
  221. if (!rates[index])
  222. continue;
  223. ret = clk_get_by_indexed_prop(dev, "assigned-clocks",
  224. index, &clk);
  225. if (ret) {
  226. debug("%s: could not get assigned clock %d for %s\n",
  227. __func__, index, dev_read_name(dev));
  228. continue;
  229. }
  230. /* This is clk provider device trying to program itself
  231. * It cannot be done right now but need to wait after the
  232. * device is probed
  233. */
  234. if (stage == 0 && clk.dev == dev)
  235. continue;
  236. if (stage > 0 && clk.dev != dev)
  237. /* do not setup twice the parent clocks */
  238. continue;
  239. ret = clk_set_rate(&clk, rates[index]);
  240. if (ret < 0) {
  241. debug("%s: failed to set rate on clock index %d (%ld) for %s\n",
  242. __func__, index, clk.id, dev_read_name(dev));
  243. break;
  244. }
  245. }
  246. fail:
  247. free(rates);
  248. return ret;
  249. }
  250. int clk_set_defaults(struct udevice *dev, int stage)
  251. {
  252. int ret;
  253. if (!dev_of_valid(dev))
  254. return 0;
  255. /* If this not in SPL and pre-reloc state, don't take any action. */
  256. if (!(IS_ENABLED(CONFIG_SPL_BUILD) || (gd->flags & GD_FLG_RELOC)))
  257. return 0;
  258. debug("%s(%s)\n", __func__, dev_read_name(dev));
  259. ret = clk_set_default_parents(dev, stage);
  260. if (ret)
  261. return ret;
  262. ret = clk_set_default_rates(dev, stage);
  263. if (ret < 0)
  264. return ret;
  265. return 0;
  266. }
  267. int clk_get_by_name(struct udevice *dev, const char *name, struct clk *clk)
  268. {
  269. int index;
  270. debug("%s(dev=%p, name=%s, clk=%p)\n", __func__, dev, name, clk);
  271. clk->dev = NULL;
  272. index = dev_read_stringlist_search(dev, "clock-names", name);
  273. if (index < 0) {
  274. debug("fdt_stringlist_search() failed: %d\n", index);
  275. return index;
  276. }
  277. return clk_get_by_index(dev, index, clk);
  278. }
  279. # endif /* OF_PLATDATA */
  280. int clk_get_by_name_nodev(ofnode node, const char *name, struct clk *clk)
  281. {
  282. int index;
  283. debug("%s(node=%p, name=%s, clk=%p)\n", __func__,
  284. ofnode_get_name(node), name, clk);
  285. clk->dev = NULL;
  286. index = ofnode_stringlist_search(node, "clock-names", name);
  287. if (index < 0) {
  288. debug("fdt_stringlist_search() failed: %d\n", index);
  289. return index;
  290. }
  291. return clk_get_by_index_nodev(node, index, clk);
  292. }
  293. int clk_get_optional_nodev(ofnode node, const char *name, struct clk *clk)
  294. {
  295. int ret;
  296. ret = clk_get_by_name_nodev(node, name, clk);
  297. if (ret == -ENODATA)
  298. return 0;
  299. return ret;
  300. }
  301. int clk_release_all(struct clk *clk, int count)
  302. {
  303. int i, ret;
  304. for (i = 0; i < count; i++) {
  305. debug("%s(clk[%d]=%p)\n", __func__, i, &clk[i]);
  306. /* check if clock has been previously requested */
  307. if (!clk[i].dev)
  308. continue;
  309. ret = clk_disable(&clk[i]);
  310. if (ret && ret != -ENOSYS)
  311. return ret;
  312. ret = clk_free(&clk[i]);
  313. if (ret && ret != -ENOSYS)
  314. return ret;
  315. }
  316. return 0;
  317. }
  318. #endif /* OF_CONTROL */
  319. int clk_request(struct udevice *dev, struct clk *clk)
  320. {
  321. const struct clk_ops *ops;
  322. debug("%s(dev=%p, clk=%p)\n", __func__, dev, clk);
  323. if (!clk)
  324. return 0;
  325. ops = clk_dev_ops(dev);
  326. clk->dev = dev;
  327. if (!ops->request)
  328. return 0;
  329. return ops->request(clk);
  330. }
  331. int clk_free(struct clk *clk)
  332. {
  333. const struct clk_ops *ops;
  334. debug("%s(clk=%p)\n", __func__, clk);
  335. if (!clk_valid(clk))
  336. return 0;
  337. ops = clk_dev_ops(clk->dev);
  338. if (!ops->rfree)
  339. return 0;
  340. return ops->rfree(clk);
  341. }
  342. ulong clk_get_rate(struct clk *clk)
  343. {
  344. const struct clk_ops *ops;
  345. debug("%s(clk=%p)\n", __func__, clk);
  346. if (!clk_valid(clk))
  347. return 0;
  348. ops = clk_dev_ops(clk->dev);
  349. if (!ops->get_rate)
  350. return -ENOSYS;
  351. return ops->get_rate(clk);
  352. }
  353. struct clk *clk_get_parent(struct clk *clk)
  354. {
  355. struct udevice *pdev;
  356. struct clk *pclk;
  357. debug("%s(clk=%p)\n", __func__, clk);
  358. if (!clk_valid(clk))
  359. return NULL;
  360. pdev = dev_get_parent(clk->dev);
  361. pclk = dev_get_clk_ptr(pdev);
  362. if (!pclk)
  363. return ERR_PTR(-ENODEV);
  364. return pclk;
  365. }
  366. long long clk_get_parent_rate(struct clk *clk)
  367. {
  368. const struct clk_ops *ops;
  369. struct clk *pclk;
  370. debug("%s(clk=%p)\n", __func__, clk);
  371. if (!clk_valid(clk))
  372. return 0;
  373. pclk = clk_get_parent(clk);
  374. if (IS_ERR(pclk))
  375. return -ENODEV;
  376. ops = clk_dev_ops(pclk->dev);
  377. if (!ops->get_rate)
  378. return -ENOSYS;
  379. /* Read the 'rate' if not already set or if proper flag set*/
  380. if (!pclk->rate || pclk->flags & CLK_GET_RATE_NOCACHE)
  381. pclk->rate = clk_get_rate(pclk);
  382. return pclk->rate;
  383. }
  384. ulong clk_set_rate(struct clk *clk, ulong rate)
  385. {
  386. const struct clk_ops *ops;
  387. debug("%s(clk=%p, rate=%lu)\n", __func__, clk, rate);
  388. if (!clk_valid(clk))
  389. return 0;
  390. ops = clk_dev_ops(clk->dev);
  391. if (!ops->set_rate)
  392. return -ENOSYS;
  393. return ops->set_rate(clk, rate);
  394. }
  395. int clk_set_parent(struct clk *clk, struct clk *parent)
  396. {
  397. const struct clk_ops *ops;
  398. debug("%s(clk=%p, parent=%p)\n", __func__, clk, parent);
  399. if (!clk_valid(clk))
  400. return 0;
  401. ops = clk_dev_ops(clk->dev);
  402. if (!ops->set_parent)
  403. return -ENOSYS;
  404. return ops->set_parent(clk, parent);
  405. }
  406. int clk_enable(struct clk *clk)
  407. {
  408. const struct clk_ops *ops;
  409. struct clk *clkp = NULL;
  410. int ret;
  411. debug("%s(clk=%p)\n", __func__, clk);
  412. if (!clk_valid(clk))
  413. return 0;
  414. ops = clk_dev_ops(clk->dev);
  415. if (CONFIG_IS_ENABLED(CLK_CCF)) {
  416. /* Take id 0 as a non-valid clk, such as dummy */
  417. if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
  418. if (clkp->enable_count) {
  419. clkp->enable_count++;
  420. return 0;
  421. }
  422. if (clkp->dev->parent &&
  423. device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
  424. ret = clk_enable(dev_get_clk_ptr(clkp->dev->parent));
  425. if (ret) {
  426. printf("Enable %s failed\n",
  427. clkp->dev->parent->name);
  428. return ret;
  429. }
  430. }
  431. }
  432. if (ops->enable) {
  433. ret = ops->enable(clk);
  434. if (ret) {
  435. printf("Enable %s failed\n", clk->dev->name);
  436. return ret;
  437. }
  438. }
  439. if (clkp)
  440. clkp->enable_count++;
  441. } else {
  442. if (!ops->enable)
  443. return -ENOSYS;
  444. return ops->enable(clk);
  445. }
  446. return 0;
  447. }
  448. int clk_enable_bulk(struct clk_bulk *bulk)
  449. {
  450. int i, ret;
  451. for (i = 0; i < bulk->count; i++) {
  452. ret = clk_enable(&bulk->clks[i]);
  453. if (ret < 0 && ret != -ENOSYS)
  454. return ret;
  455. }
  456. return 0;
  457. }
  458. int clk_disable(struct clk *clk)
  459. {
  460. const struct clk_ops *ops;
  461. struct clk *clkp = NULL;
  462. int ret;
  463. debug("%s(clk=%p)\n", __func__, clk);
  464. if (!clk_valid(clk))
  465. return 0;
  466. ops = clk_dev_ops(clk->dev);
  467. if (CONFIG_IS_ENABLED(CLK_CCF)) {
  468. if (clk->id && !clk_get_by_id(clk->id, &clkp)) {
  469. if (clkp->enable_count == 0) {
  470. printf("clk %s already disabled\n",
  471. clkp->dev->name);
  472. return 0;
  473. }
  474. if (--clkp->enable_count > 0)
  475. return 0;
  476. }
  477. if (ops->disable) {
  478. ret = ops->disable(clk);
  479. if (ret)
  480. return ret;
  481. }
  482. if (clkp && clkp->dev->parent &&
  483. device_get_uclass_id(clkp->dev) == UCLASS_CLK) {
  484. ret = clk_disable(dev_get_clk_ptr(clkp->dev->parent));
  485. if (ret) {
  486. printf("Disable %s failed\n",
  487. clkp->dev->parent->name);
  488. return ret;
  489. }
  490. }
  491. } else {
  492. if (!ops->disable)
  493. return -ENOSYS;
  494. return ops->disable(clk);
  495. }
  496. return 0;
  497. }
  498. int clk_disable_bulk(struct clk_bulk *bulk)
  499. {
  500. int i, ret;
  501. for (i = 0; i < bulk->count; i++) {
  502. ret = clk_disable(&bulk->clks[i]);
  503. if (ret < 0 && ret != -ENOSYS)
  504. return ret;
  505. }
  506. return 0;
  507. }
  508. int clk_get_by_id(ulong id, struct clk **clkp)
  509. {
  510. struct udevice *dev;
  511. struct uclass *uc;
  512. int ret;
  513. ret = uclass_get(UCLASS_CLK, &uc);
  514. if (ret)
  515. return ret;
  516. uclass_foreach_dev(dev, uc) {
  517. struct clk *clk = dev_get_clk_ptr(dev);
  518. if (clk && clk->id == id) {
  519. *clkp = clk;
  520. return 0;
  521. }
  522. }
  523. return -ENOENT;
  524. }
  525. bool clk_is_match(const struct clk *p, const struct clk *q)
  526. {
  527. /* trivial case: identical struct clk's or both NULL */
  528. if (p == q)
  529. return true;
  530. /* trivial case #2: on the clk pointer is NULL */
  531. if (!p || !q)
  532. return false;
  533. /* same device, id and data */
  534. if (p->dev == q->dev && p->id == q->id && p->data == q->data)
  535. return true;
  536. return false;
  537. }
  538. static void devm_clk_release(struct udevice *dev, void *res)
  539. {
  540. clk_free(res);
  541. }
  542. static int devm_clk_match(struct udevice *dev, void *res, void *data)
  543. {
  544. return res == data;
  545. }
  546. struct clk *devm_clk_get(struct udevice *dev, const char *id)
  547. {
  548. int rc;
  549. struct clk *clk;
  550. clk = devres_alloc(devm_clk_release, sizeof(struct clk), __GFP_ZERO);
  551. if (unlikely(!clk))
  552. return ERR_PTR(-ENOMEM);
  553. rc = clk_get_by_name(dev, id, clk);
  554. if (rc)
  555. return ERR_PTR(rc);
  556. devres_add(dev, clk);
  557. return clk;
  558. }
  559. struct clk *devm_clk_get_optional(struct udevice *dev, const char *id)
  560. {
  561. struct clk *clk = devm_clk_get(dev, id);
  562. if (PTR_ERR(clk) == -ENODATA)
  563. return NULL;
  564. return clk;
  565. }
  566. void devm_clk_put(struct udevice *dev, struct clk *clk)
  567. {
  568. int rc;
  569. if (!clk)
  570. return;
  571. rc = devres_release(dev, devm_clk_release, devm_clk_match, clk);
  572. WARN_ON(rc);
  573. }
  574. int clk_uclass_post_probe(struct udevice *dev)
  575. {
  576. /*
  577. * when a clock provider is probed. Call clk_set_defaults()
  578. * also after the device is probed. This takes care of cases
  579. * where the DT is used to setup default parents and rates
  580. * using assigned-clocks
  581. */
  582. clk_set_defaults(dev, 1);
  583. return 0;
  584. }
  585. UCLASS_DRIVER(clk) = {
  586. .id = UCLASS_CLK,
  587. .name = "clk",
  588. .post_probe = clk_uclass_post_probe,
  589. };