of.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Generic OPP OF helpers
  4. *
  5. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  6. * Nishanth Menon
  7. * Romit Dasgupta
  8. * Kevin Hilman
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/cpu.h>
  12. #include <linux/errno.h>
  13. #include <linux/device.h>
  14. #include <linux/of_device.h>
  15. #include <linux/pm_domain.h>
  16. #include <linux/slab.h>
  17. #include <linux/export.h>
  18. #include <linux/energy_model.h>
  19. #include "opp.h"
  20. /*
  21. * Returns opp descriptor node for a device node, caller must
  22. * do of_node_put().
  23. */
  24. static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
  25. int index)
  26. {
  27. /* "operating-points-v2" can be an array for power domain providers */
  28. return of_parse_phandle(np, "operating-points-v2", index);
  29. }
  30. /* Returns opp descriptor node for a device, caller must do of_node_put() */
  31. struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
  32. {
  33. return _opp_of_get_opp_desc_node(dev->of_node, 0);
  34. }
  35. EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
  36. struct opp_table *_managed_opp(struct device *dev, int index)
  37. {
  38. struct opp_table *opp_table, *managed_table = NULL;
  39. struct device_node *np;
  40. np = _opp_of_get_opp_desc_node(dev->of_node, index);
  41. if (!np)
  42. return NULL;
  43. list_for_each_entry(opp_table, &opp_tables, node) {
  44. if (opp_table->np == np) {
  45. /*
  46. * Multiple devices can point to the same OPP table and
  47. * so will have same node-pointer, np.
  48. *
  49. * But the OPPs will be considered as shared only if the
  50. * OPP table contains a "opp-shared" property.
  51. */
  52. if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
  53. _get_opp_table_kref(opp_table);
  54. managed_table = opp_table;
  55. }
  56. break;
  57. }
  58. }
  59. of_node_put(np);
  60. return managed_table;
  61. }
  62. /* The caller must call dev_pm_opp_put() after the OPP is used */
  63. static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
  64. struct device_node *opp_np)
  65. {
  66. struct dev_pm_opp *opp;
  67. mutex_lock(&opp_table->lock);
  68. list_for_each_entry(opp, &opp_table->opp_list, node) {
  69. if (opp->np == opp_np) {
  70. dev_pm_opp_get(opp);
  71. mutex_unlock(&opp_table->lock);
  72. return opp;
  73. }
  74. }
  75. mutex_unlock(&opp_table->lock);
  76. return NULL;
  77. }
  78. static struct device_node *of_parse_required_opp(struct device_node *np,
  79. int index)
  80. {
  81. return of_parse_phandle(np, "required-opps", index);
  82. }
  83. /* The caller must call dev_pm_opp_put_opp_table() after the table is used */
  84. static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
  85. {
  86. struct opp_table *opp_table;
  87. struct device_node *opp_table_np;
  88. lockdep_assert_held(&opp_table_lock);
  89. opp_table_np = of_get_parent(opp_np);
  90. if (!opp_table_np)
  91. goto err;
  92. /* It is safe to put the node now as all we need now is its address */
  93. of_node_put(opp_table_np);
  94. list_for_each_entry(opp_table, &opp_tables, node) {
  95. if (opp_table_np == opp_table->np) {
  96. _get_opp_table_kref(opp_table);
  97. return opp_table;
  98. }
  99. }
  100. err:
  101. return ERR_PTR(-ENODEV);
  102. }
  103. /* Free resources previously acquired by _opp_table_alloc_required_tables() */
  104. static void _opp_table_free_required_tables(struct opp_table *opp_table)
  105. {
  106. struct opp_table **required_opp_tables = opp_table->required_opp_tables;
  107. int i;
  108. if (!required_opp_tables)
  109. return;
  110. for (i = 0; i < opp_table->required_opp_count; i++) {
  111. if (IS_ERR_OR_NULL(required_opp_tables[i]))
  112. break;
  113. dev_pm_opp_put_opp_table(required_opp_tables[i]);
  114. }
  115. kfree(required_opp_tables);
  116. opp_table->required_opp_count = 0;
  117. opp_table->required_opp_tables = NULL;
  118. }
  119. /*
  120. * Populate all devices and opp tables which are part of "required-opps" list.
  121. * Checking only the first OPP node should be enough.
  122. */
  123. static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
  124. struct device *dev,
  125. struct device_node *opp_np)
  126. {
  127. struct opp_table **required_opp_tables;
  128. struct device_node *required_np, *np;
  129. int count, i;
  130. /* Traversing the first OPP node is all we need */
  131. np = of_get_next_available_child(opp_np, NULL);
  132. if (!np) {
  133. dev_err(dev, "Empty OPP table\n");
  134. return;
  135. }
  136. count = of_count_phandle_with_args(np, "required-opps", NULL);
  137. if (!count)
  138. goto put_np;
  139. required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
  140. GFP_KERNEL);
  141. if (!required_opp_tables)
  142. goto put_np;
  143. opp_table->required_opp_tables = required_opp_tables;
  144. opp_table->required_opp_count = count;
  145. for (i = 0; i < count; i++) {
  146. required_np = of_parse_required_opp(np, i);
  147. if (!required_np)
  148. goto free_required_tables;
  149. required_opp_tables[i] = _find_table_of_opp_np(required_np);
  150. of_node_put(required_np);
  151. if (IS_ERR(required_opp_tables[i]))
  152. goto free_required_tables;
  153. /*
  154. * We only support genpd's OPPs in the "required-opps" for now,
  155. * as we don't know how much about other cases. Error out if the
  156. * required OPP doesn't belong to a genpd.
  157. */
  158. if (!required_opp_tables[i]->is_genpd) {
  159. dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n",
  160. required_np);
  161. goto free_required_tables;
  162. }
  163. }
  164. goto put_np;
  165. free_required_tables:
  166. _opp_table_free_required_tables(opp_table);
  167. put_np:
  168. of_node_put(np);
  169. }
  170. void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
  171. int index)
  172. {
  173. struct device_node *np, *opp_np;
  174. u32 val;
  175. /*
  176. * Only required for backward compatibility with v1 bindings, but isn't
  177. * harmful for other cases. And so we do it unconditionally.
  178. */
  179. np = of_node_get(dev->of_node);
  180. if (!np)
  181. return;
  182. if (!of_property_read_u32(np, "clock-latency", &val))
  183. opp_table->clock_latency_ns_max = val;
  184. of_property_read_u32(np, "voltage-tolerance",
  185. &opp_table->voltage_tolerance_v1);
  186. if (of_find_property(np, "#power-domain-cells", NULL))
  187. opp_table->is_genpd = true;
  188. /* Get OPP table node */
  189. opp_np = _opp_of_get_opp_desc_node(np, index);
  190. of_node_put(np);
  191. if (!opp_np)
  192. return;
  193. if (of_property_read_bool(opp_np, "opp-shared"))
  194. opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
  195. else
  196. opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
  197. opp_table->np = opp_np;
  198. _opp_table_alloc_required_tables(opp_table, dev, opp_np);
  199. of_node_put(opp_np);
  200. }
  201. void _of_clear_opp_table(struct opp_table *opp_table)
  202. {
  203. _opp_table_free_required_tables(opp_table);
  204. }
  205. /*
  206. * Release all resources previously acquired with a call to
  207. * _of_opp_alloc_required_opps().
  208. */
  209. void _of_opp_free_required_opps(struct opp_table *opp_table,
  210. struct dev_pm_opp *opp)
  211. {
  212. struct dev_pm_opp **required_opps = opp->required_opps;
  213. int i;
  214. if (!required_opps)
  215. return;
  216. for (i = 0; i < opp_table->required_opp_count; i++) {
  217. if (!required_opps[i])
  218. break;
  219. /* Put the reference back */
  220. dev_pm_opp_put(required_opps[i]);
  221. }
  222. kfree(required_opps);
  223. opp->required_opps = NULL;
  224. }
  225. /* Populate all required OPPs which are part of "required-opps" list */
  226. static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
  227. struct dev_pm_opp *opp)
  228. {
  229. struct dev_pm_opp **required_opps;
  230. struct opp_table *required_table;
  231. struct device_node *np;
  232. int i, ret, count = opp_table->required_opp_count;
  233. if (!count)
  234. return 0;
  235. required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL);
  236. if (!required_opps)
  237. return -ENOMEM;
  238. opp->required_opps = required_opps;
  239. for (i = 0; i < count; i++) {
  240. required_table = opp_table->required_opp_tables[i];
  241. np = of_parse_required_opp(opp->np, i);
  242. if (unlikely(!np)) {
  243. ret = -ENODEV;
  244. goto free_required_opps;
  245. }
  246. required_opps[i] = _find_opp_of_np(required_table, np);
  247. of_node_put(np);
  248. if (!required_opps[i]) {
  249. pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
  250. __func__, opp->np, i);
  251. ret = -ENODEV;
  252. goto free_required_opps;
  253. }
  254. }
  255. return 0;
  256. free_required_opps:
  257. _of_opp_free_required_opps(opp_table, opp);
  258. return ret;
  259. }
  260. static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
  261. {
  262. struct device_node *np, *opp_np;
  263. struct property *prop;
  264. if (!opp_table) {
  265. np = of_node_get(dev->of_node);
  266. if (!np)
  267. return -ENODEV;
  268. opp_np = _opp_of_get_opp_desc_node(np, 0);
  269. of_node_put(np);
  270. } else {
  271. opp_np = of_node_get(opp_table->np);
  272. }
  273. /* Lets not fail in case we are parsing opp-v1 bindings */
  274. if (!opp_np)
  275. return 0;
  276. /* Checking only first OPP is sufficient */
  277. np = of_get_next_available_child(opp_np, NULL);
  278. if (!np) {
  279. dev_err(dev, "OPP table empty\n");
  280. return -EINVAL;
  281. }
  282. of_node_put(opp_np);
  283. prop = of_find_property(np, "opp-peak-kBps", NULL);
  284. of_node_put(np);
  285. if (!prop || !prop->length)
  286. return 0;
  287. return 1;
  288. }
  289. int dev_pm_opp_of_find_icc_paths(struct device *dev,
  290. struct opp_table *opp_table)
  291. {
  292. struct device_node *np;
  293. int ret, i, count, num_paths;
  294. struct icc_path **paths;
  295. ret = _bandwidth_supported(dev, opp_table);
  296. if (ret <= 0)
  297. return ret;
  298. ret = 0;
  299. np = of_node_get(dev->of_node);
  300. if (!np)
  301. return 0;
  302. count = of_count_phandle_with_args(np, "interconnects",
  303. "#interconnect-cells");
  304. of_node_put(np);
  305. if (count < 0)
  306. return 0;
  307. /* two phandles when #interconnect-cells = <1> */
  308. if (count % 2) {
  309. dev_err(dev, "%s: Invalid interconnects values\n", __func__);
  310. return -EINVAL;
  311. }
  312. num_paths = count / 2;
  313. paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL);
  314. if (!paths)
  315. return -ENOMEM;
  316. for (i = 0; i < num_paths; i++) {
  317. paths[i] = of_icc_get_by_index(dev, i);
  318. if (IS_ERR(paths[i])) {
  319. ret = PTR_ERR(paths[i]);
  320. if (ret != -EPROBE_DEFER) {
  321. dev_err(dev, "%s: Unable to get path%d: %d\n",
  322. __func__, i, ret);
  323. }
  324. goto err;
  325. }
  326. }
  327. if (opp_table) {
  328. opp_table->paths = paths;
  329. opp_table->path_count = num_paths;
  330. return 0;
  331. }
  332. err:
  333. while (i--)
  334. icc_put(paths[i]);
  335. kfree(paths);
  336. return ret;
  337. }
  338. EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths);
  339. static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
  340. struct device_node *np)
  341. {
  342. unsigned int levels = opp_table->supported_hw_count;
  343. int count, versions, ret, i, j;
  344. u32 val;
  345. if (!opp_table->supported_hw) {
  346. /*
  347. * In the case that no supported_hw has been set by the
  348. * platform but there is an opp-supported-hw value set for
  349. * an OPP then the OPP should not be enabled as there is
  350. * no way to see if the hardware supports it.
  351. */
  352. if (of_find_property(np, "opp-supported-hw", NULL))
  353. return false;
  354. else
  355. return true;
  356. }
  357. count = of_property_count_u32_elems(np, "opp-supported-hw");
  358. if (count <= 0 || count % levels) {
  359. dev_err(dev, "%s: Invalid opp-supported-hw property (%d)\n",
  360. __func__, count);
  361. return false;
  362. }
  363. versions = count / levels;
  364. /* All levels in at least one of the versions should match */
  365. for (i = 0; i < versions; i++) {
  366. bool supported = true;
  367. for (j = 0; j < levels; j++) {
  368. ret = of_property_read_u32_index(np, "opp-supported-hw",
  369. i * levels + j, &val);
  370. if (ret) {
  371. dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
  372. __func__, i * levels + j, ret);
  373. return false;
  374. }
  375. /* Check if the level is supported */
  376. if (!(val & opp_table->supported_hw[j])) {
  377. supported = false;
  378. break;
  379. }
  380. }
  381. if (supported)
  382. return true;
  383. }
  384. return false;
  385. }
  386. static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
  387. struct opp_table *opp_table)
  388. {
  389. u32 *microvolt, *microamp = NULL;
  390. int supplies = opp_table->regulator_count, vcount, icount, ret, i, j;
  391. struct property *prop = NULL;
  392. char name[NAME_MAX];
  393. /* Search for "opp-microvolt-<name>" */
  394. if (opp_table->prop_name) {
  395. snprintf(name, sizeof(name), "opp-microvolt-%s",
  396. opp_table->prop_name);
  397. prop = of_find_property(opp->np, name, NULL);
  398. }
  399. if (!prop) {
  400. /* Search for "opp-microvolt" */
  401. sprintf(name, "opp-microvolt");
  402. prop = of_find_property(opp->np, name, NULL);
  403. /* Missing property isn't a problem, but an invalid entry is */
  404. if (!prop) {
  405. if (unlikely(supplies == -1)) {
  406. /* Initialize regulator_count */
  407. opp_table->regulator_count = 0;
  408. return 0;
  409. }
  410. if (!supplies)
  411. return 0;
  412. dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
  413. __func__);
  414. return -EINVAL;
  415. }
  416. }
  417. if (unlikely(supplies == -1)) {
  418. /* Initialize regulator_count */
  419. supplies = opp_table->regulator_count = 1;
  420. } else if (unlikely(!supplies)) {
  421. dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__);
  422. return -EINVAL;
  423. }
  424. vcount = of_property_count_u32_elems(opp->np, name);
  425. if (vcount < 0) {
  426. dev_err(dev, "%s: Invalid %s property (%d)\n",
  427. __func__, name, vcount);
  428. return vcount;
  429. }
  430. /* There can be one or three elements per supply */
  431. if (vcount != supplies && vcount != supplies * 3) {
  432. dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
  433. __func__, name, vcount, supplies);
  434. return -EINVAL;
  435. }
  436. microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
  437. if (!microvolt)
  438. return -ENOMEM;
  439. ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
  440. if (ret) {
  441. dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
  442. ret = -EINVAL;
  443. goto free_microvolt;
  444. }
  445. /* Search for "opp-microamp-<name>" */
  446. prop = NULL;
  447. if (opp_table->prop_name) {
  448. snprintf(name, sizeof(name), "opp-microamp-%s",
  449. opp_table->prop_name);
  450. prop = of_find_property(opp->np, name, NULL);
  451. }
  452. if (!prop) {
  453. /* Search for "opp-microamp" */
  454. sprintf(name, "opp-microamp");
  455. prop = of_find_property(opp->np, name, NULL);
  456. }
  457. if (prop) {
  458. icount = of_property_count_u32_elems(opp->np, name);
  459. if (icount < 0) {
  460. dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
  461. name, icount);
  462. ret = icount;
  463. goto free_microvolt;
  464. }
  465. if (icount != supplies) {
  466. dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
  467. __func__, name, icount, supplies);
  468. ret = -EINVAL;
  469. goto free_microvolt;
  470. }
  471. microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
  472. if (!microamp) {
  473. ret = -EINVAL;
  474. goto free_microvolt;
  475. }
  476. ret = of_property_read_u32_array(opp->np, name, microamp,
  477. icount);
  478. if (ret) {
  479. dev_err(dev, "%s: error parsing %s: %d\n", __func__,
  480. name, ret);
  481. ret = -EINVAL;
  482. goto free_microamp;
  483. }
  484. }
  485. for (i = 0, j = 0; i < supplies; i++) {
  486. opp->supplies[i].u_volt = microvolt[j++];
  487. if (vcount == supplies) {
  488. opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
  489. opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
  490. } else {
  491. opp->supplies[i].u_volt_min = microvolt[j++];
  492. opp->supplies[i].u_volt_max = microvolt[j++];
  493. }
  494. if (microamp)
  495. opp->supplies[i].u_amp = microamp[i];
  496. }
  497. free_microamp:
  498. kfree(microamp);
  499. free_microvolt:
  500. kfree(microvolt);
  501. return ret;
  502. }
  503. /**
  504. * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
  505. * entries
  506. * @dev: device pointer used to lookup OPP table.
  507. *
  508. * Free OPPs created using static entries present in DT.
  509. */
  510. void dev_pm_opp_of_remove_table(struct device *dev)
  511. {
  512. dev_pm_opp_remove_table(dev);
  513. }
  514. EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
  515. static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
  516. struct device_node *np, bool peak)
  517. {
  518. const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
  519. struct property *prop;
  520. int i, count, ret;
  521. u32 *bw;
  522. prop = of_find_property(np, name, NULL);
  523. if (!prop)
  524. return -ENODEV;
  525. count = prop->length / sizeof(u32);
  526. if (table->path_count != count) {
  527. pr_err("%s: Mismatch between %s and paths (%d %d)\n",
  528. __func__, name, count, table->path_count);
  529. return -EINVAL;
  530. }
  531. bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL);
  532. if (!bw)
  533. return -ENOMEM;
  534. ret = of_property_read_u32_array(np, name, bw, count);
  535. if (ret) {
  536. pr_err("%s: Error parsing %s: %d\n", __func__, name, ret);
  537. goto out;
  538. }
  539. for (i = 0; i < count; i++) {
  540. if (peak)
  541. new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]);
  542. else
  543. new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]);
  544. }
  545. out:
  546. kfree(bw);
  547. return ret;
  548. }
  549. static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table,
  550. struct device_node *np, bool *rate_not_available)
  551. {
  552. bool found = false;
  553. u64 rate;
  554. int ret;
  555. ret = of_property_read_u64(np, "opp-hz", &rate);
  556. if (!ret) {
  557. /*
  558. * Rate is defined as an unsigned long in clk API, and so
  559. * casting explicitly to its type. Must be fixed once rate is 64
  560. * bit guaranteed in clk API.
  561. */
  562. new_opp->rate = (unsigned long)rate;
  563. found = true;
  564. }
  565. *rate_not_available = !!ret;
  566. /*
  567. * Bandwidth consists of peak and average (optional) values:
  568. * opp-peak-kBps = <path1_value path2_value>;
  569. * opp-avg-kBps = <path1_value path2_value>;
  570. */
  571. ret = _read_bw(new_opp, table, np, true);
  572. if (!ret) {
  573. found = true;
  574. ret = _read_bw(new_opp, table, np, false);
  575. }
  576. /* The properties were found but we failed to parse them */
  577. if (ret && ret != -ENODEV)
  578. return ret;
  579. if (!of_property_read_u32(np, "opp-level", &new_opp->level))
  580. found = true;
  581. if (found)
  582. return 0;
  583. return ret;
  584. }
  585. /**
  586. * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  587. * @opp_table: OPP table
  588. * @dev: device for which we do this operation
  589. * @np: device node
  590. *
  591. * This function adds an opp definition to the opp table and returns status. The
  592. * opp can be controlled using dev_pm_opp_enable/disable functions and may be
  593. * removed by dev_pm_opp_remove.
  594. *
  595. * Return:
  596. * Valid OPP pointer:
  597. * On success
  598. * NULL:
  599. * Duplicate OPPs (both freq and volt are same) and opp->available
  600. * OR if the OPP is not supported by hardware.
  601. * ERR_PTR(-EEXIST):
  602. * Freq are same and volt are different OR
  603. * Duplicate OPPs (both freq and volt are same) and !opp->available
  604. * ERR_PTR(-ENOMEM):
  605. * Memory allocation failure
  606. * ERR_PTR(-EINVAL):
  607. * Failed parsing the OPP node
  608. */
  609. static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
  610. struct device *dev, struct device_node *np)
  611. {
  612. struct dev_pm_opp *new_opp;
  613. u32 val;
  614. int ret;
  615. bool rate_not_available = false;
  616. new_opp = _opp_allocate(opp_table);
  617. if (!new_opp)
  618. return ERR_PTR(-ENOMEM);
  619. ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
  620. if (ret < 0 && !opp_table->is_genpd) {
  621. dev_err(dev, "%s: opp key field not found\n", __func__);
  622. goto free_opp;
  623. }
  624. /* Check if the OPP supports hardware's hierarchy of versions or not */
  625. if (!_opp_is_supported(dev, opp_table, np)) {
  626. dev_dbg(dev, "OPP not supported by hardware: %lu\n",
  627. new_opp->rate);
  628. goto free_opp;
  629. }
  630. new_opp->turbo = of_property_read_bool(np, "turbo-mode");
  631. new_opp->np = np;
  632. new_opp->dynamic = false;
  633. new_opp->available = true;
  634. ret = _of_opp_alloc_required_opps(opp_table, new_opp);
  635. if (ret)
  636. goto free_opp;
  637. if (!of_property_read_u32(np, "clock-latency-ns", &val))
  638. new_opp->clock_latency_ns = val;
  639. ret = opp_parse_supplies(new_opp, dev, opp_table);
  640. if (ret)
  641. goto free_required_opps;
  642. if (opp_table->is_genpd)
  643. new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
  644. ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
  645. if (ret) {
  646. /* Don't return error for duplicate OPPs */
  647. if (ret == -EBUSY)
  648. ret = 0;
  649. goto free_required_opps;
  650. }
  651. /* OPP to select on device suspend */
  652. if (of_property_read_bool(np, "opp-suspend")) {
  653. if (opp_table->suspend_opp) {
  654. /* Pick the OPP with higher rate as suspend OPP */
  655. if (new_opp->rate > opp_table->suspend_opp->rate) {
  656. opp_table->suspend_opp->suspend = false;
  657. new_opp->suspend = true;
  658. opp_table->suspend_opp = new_opp;
  659. }
  660. } else {
  661. new_opp->suspend = true;
  662. opp_table->suspend_opp = new_opp;
  663. }
  664. }
  665. if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
  666. opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
  667. pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
  668. __func__, new_opp->turbo, new_opp->rate,
  669. new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
  670. new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns);
  671. /*
  672. * Notify the changes in the availability of the operable
  673. * frequency/voltage list.
  674. */
  675. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
  676. return new_opp;
  677. free_required_opps:
  678. _of_opp_free_required_opps(opp_table, new_opp);
  679. free_opp:
  680. _opp_free(new_opp);
  681. return ret ? ERR_PTR(ret) : NULL;
  682. }
  683. /* Initializes OPP tables based on new bindings */
  684. static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
  685. {
  686. struct device_node *np;
  687. int ret, count = 0;
  688. struct dev_pm_opp *opp;
  689. /* OPP table is already initialized for the device */
  690. mutex_lock(&opp_table->lock);
  691. if (opp_table->parsed_static_opps) {
  692. opp_table->parsed_static_opps++;
  693. mutex_unlock(&opp_table->lock);
  694. return 0;
  695. }
  696. opp_table->parsed_static_opps = 1;
  697. mutex_unlock(&opp_table->lock);
  698. /* We have opp-table node now, iterate over it and add OPPs */
  699. for_each_available_child_of_node(opp_table->np, np) {
  700. opp = _opp_add_static_v2(opp_table, dev, np);
  701. if (IS_ERR(opp)) {
  702. ret = PTR_ERR(opp);
  703. dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
  704. ret);
  705. of_node_put(np);
  706. goto remove_static_opp;
  707. } else if (opp) {
  708. count++;
  709. }
  710. }
  711. /* There should be one or more OPPs defined */
  712. if (!count) {
  713. dev_err(dev, "%s: no supported OPPs", __func__);
  714. ret = -ENOENT;
  715. goto remove_static_opp;
  716. }
  717. list_for_each_entry(opp, &opp_table->opp_list, node) {
  718. /* Any non-zero performance state would enable the feature */
  719. if (opp->pstate) {
  720. opp_table->genpd_performance_state = true;
  721. break;
  722. }
  723. }
  724. return 0;
  725. remove_static_opp:
  726. _opp_remove_all_static(opp_table);
  727. return ret;
  728. }
  729. /* Initializes OPP tables based on old-deprecated bindings */
  730. static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
  731. {
  732. const struct property *prop;
  733. const __be32 *val;
  734. int nr, ret = 0;
  735. mutex_lock(&opp_table->lock);
  736. if (opp_table->parsed_static_opps) {
  737. opp_table->parsed_static_opps++;
  738. mutex_unlock(&opp_table->lock);
  739. return 0;
  740. }
  741. opp_table->parsed_static_opps = 1;
  742. mutex_unlock(&opp_table->lock);
  743. prop = of_find_property(dev->of_node, "operating-points", NULL);
  744. if (!prop) {
  745. ret = -ENODEV;
  746. goto remove_static_opp;
  747. }
  748. if (!prop->value) {
  749. ret = -ENODATA;
  750. goto remove_static_opp;
  751. }
  752. /*
  753. * Each OPP is a set of tuples consisting of frequency and
  754. * voltage like <freq-kHz vol-uV>.
  755. */
  756. nr = prop->length / sizeof(u32);
  757. if (nr % 2) {
  758. dev_err(dev, "%s: Invalid OPP table\n", __func__);
  759. ret = -EINVAL;
  760. goto remove_static_opp;
  761. }
  762. val = prop->value;
  763. while (nr) {
  764. unsigned long freq = be32_to_cpup(val++) * 1000;
  765. unsigned long volt = be32_to_cpup(val++);
  766. ret = _opp_add_v1(opp_table, dev, freq, volt, false);
  767. if (ret) {
  768. dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
  769. __func__, freq, ret);
  770. goto remove_static_opp;
  771. }
  772. nr -= 2;
  773. }
  774. return 0;
  775. remove_static_opp:
  776. _opp_remove_all_static(opp_table);
  777. return ret;
  778. }
  779. /**
  780. * dev_pm_opp_of_add_table() - Initialize opp table from device tree
  781. * @dev: device pointer used to lookup OPP table.
  782. *
  783. * Register the initial OPP table with the OPP library for given device.
  784. *
  785. * Return:
  786. * 0 On success OR
  787. * Duplicate OPPs (both freq and volt are same) and opp->available
  788. * -EEXIST Freq are same and volt are different OR
  789. * Duplicate OPPs (both freq and volt are same) and !opp->available
  790. * -ENOMEM Memory allocation failure
  791. * -ENODEV when 'operating-points' property is not found or is invalid data
  792. * in device node.
  793. * -ENODATA when empty 'operating-points' property is found
  794. * -EINVAL when invalid entries are found in opp-v2 table
  795. */
  796. int dev_pm_opp_of_add_table(struct device *dev)
  797. {
  798. struct opp_table *opp_table;
  799. int ret;
  800. opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
  801. if (IS_ERR(opp_table))
  802. return PTR_ERR(opp_table);
  803. /*
  804. * OPPs have two version of bindings now. Also try the old (v1)
  805. * bindings for backward compatibility with older dtbs.
  806. */
  807. if (opp_table->np)
  808. ret = _of_add_opp_table_v2(dev, opp_table);
  809. else
  810. ret = _of_add_opp_table_v1(dev, opp_table);
  811. if (ret)
  812. dev_pm_opp_put_opp_table(opp_table);
  813. return ret;
  814. }
  815. EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
  816. /**
  817. * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
  818. * @dev: device pointer used to lookup OPP table.
  819. * @index: Index number.
  820. *
  821. * Register the initial OPP table with the OPP library for given device only
  822. * using the "operating-points-v2" property.
  823. *
  824. * Return:
  825. * 0 On success OR
  826. * Duplicate OPPs (both freq and volt are same) and opp->available
  827. * -EEXIST Freq are same and volt are different OR
  828. * Duplicate OPPs (both freq and volt are same) and !opp->available
  829. * -ENOMEM Memory allocation failure
  830. * -ENODEV when 'operating-points' property is not found or is invalid data
  831. * in device node.
  832. * -ENODATA when empty 'operating-points' property is found
  833. * -EINVAL when invalid entries are found in opp-v2 table
  834. */
  835. int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
  836. {
  837. struct opp_table *opp_table;
  838. int ret, count;
  839. if (index) {
  840. /*
  841. * If only one phandle is present, then the same OPP table
  842. * applies for all index requests.
  843. */
  844. count = of_count_phandle_with_args(dev->of_node,
  845. "operating-points-v2", NULL);
  846. if (count == 1)
  847. index = 0;
  848. }
  849. opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
  850. if (IS_ERR(opp_table))
  851. return PTR_ERR(opp_table);
  852. ret = _of_add_opp_table_v2(dev, opp_table);
  853. if (ret)
  854. dev_pm_opp_put_opp_table(opp_table);
  855. return ret;
  856. }
  857. EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
  858. /* CPU device specific helpers */
  859. /**
  860. * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
  861. * @cpumask: cpumask for which OPP table needs to be removed
  862. *
  863. * This removes the OPP tables for CPUs present in the @cpumask.
  864. * This should be used only to remove static entries created from DT.
  865. */
  866. void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
  867. {
  868. _dev_pm_opp_cpumask_remove_table(cpumask, -1);
  869. }
  870. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
  871. /**
  872. * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
  873. * @cpumask: cpumask for which OPP table needs to be added.
  874. *
  875. * This adds the OPP tables for CPUs present in the @cpumask.
  876. */
  877. int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
  878. {
  879. struct device *cpu_dev;
  880. int cpu, ret;
  881. if (WARN_ON(cpumask_empty(cpumask)))
  882. return -ENODEV;
  883. for_each_cpu(cpu, cpumask) {
  884. cpu_dev = get_cpu_device(cpu);
  885. if (!cpu_dev) {
  886. pr_err("%s: failed to get cpu%d device\n", __func__,
  887. cpu);
  888. ret = -ENODEV;
  889. goto remove_table;
  890. }
  891. ret = dev_pm_opp_of_add_table(cpu_dev);
  892. if (ret) {
  893. /*
  894. * OPP may get registered dynamically, don't print error
  895. * message here.
  896. */
  897. pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
  898. __func__, cpu, ret);
  899. goto remove_table;
  900. }
  901. }
  902. return 0;
  903. remove_table:
  904. /* Free all other OPPs */
  905. _dev_pm_opp_cpumask_remove_table(cpumask, cpu);
  906. return ret;
  907. }
  908. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
  909. /*
  910. * Works only for OPP v2 bindings.
  911. *
  912. * Returns -ENOENT if operating-points-v2 bindings aren't supported.
  913. */
  914. /**
  915. * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
  916. * @cpu_dev using operating-points-v2
  917. * bindings.
  918. *
  919. * @cpu_dev: CPU device for which we do this operation
  920. * @cpumask: cpumask to update with information of sharing CPUs
  921. *
  922. * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  923. *
  924. * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
  925. */
  926. int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
  927. struct cpumask *cpumask)
  928. {
  929. struct device_node *np, *tmp_np, *cpu_np;
  930. int cpu, ret = 0;
  931. /* Get OPP descriptor node */
  932. np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
  933. if (!np) {
  934. dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
  935. return -ENOENT;
  936. }
  937. cpumask_set_cpu(cpu_dev->id, cpumask);
  938. /* OPPs are shared ? */
  939. if (!of_property_read_bool(np, "opp-shared"))
  940. goto put_cpu_node;
  941. for_each_possible_cpu(cpu) {
  942. if (cpu == cpu_dev->id)
  943. continue;
  944. cpu_np = of_cpu_device_node_get(cpu);
  945. if (!cpu_np) {
  946. dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
  947. __func__, cpu);
  948. ret = -ENOENT;
  949. goto put_cpu_node;
  950. }
  951. /* Get OPP descriptor node */
  952. tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0);
  953. of_node_put(cpu_np);
  954. if (!tmp_np) {
  955. pr_err("%pOF: Couldn't find opp node\n", cpu_np);
  956. ret = -ENOENT;
  957. goto put_cpu_node;
  958. }
  959. /* CPUs are sharing opp node */
  960. if (np == tmp_np)
  961. cpumask_set_cpu(cpu, cpumask);
  962. of_node_put(tmp_np);
  963. }
  964. put_cpu_node:
  965. of_node_put(np);
  966. return ret;
  967. }
  968. EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
  969. /**
  970. * of_get_required_opp_performance_state() - Search for required OPP and return its performance state.
  971. * @np: Node that contains the "required-opps" property.
  972. * @index: Index of the phandle to parse.
  973. *
  974. * Returns the performance state of the OPP pointed out by the "required-opps"
  975. * property at @index in @np.
  976. *
  977. * Return: Zero or positive performance state on success, otherwise negative
  978. * value on errors.
  979. */
  980. int of_get_required_opp_performance_state(struct device_node *np, int index)
  981. {
  982. struct dev_pm_opp *opp;
  983. struct device_node *required_np;
  984. struct opp_table *opp_table;
  985. int pstate = -EINVAL;
  986. required_np = of_parse_required_opp(np, index);
  987. if (!required_np)
  988. return -ENODEV;
  989. opp_table = _find_table_of_opp_np(required_np);
  990. if (IS_ERR(opp_table)) {
  991. pr_err("%s: Failed to find required OPP table %pOF: %ld\n",
  992. __func__, np, PTR_ERR(opp_table));
  993. goto put_required_np;
  994. }
  995. opp = _find_opp_of_np(opp_table, required_np);
  996. if (opp) {
  997. pstate = opp->pstate;
  998. dev_pm_opp_put(opp);
  999. }
  1000. dev_pm_opp_put_opp_table(opp_table);
  1001. put_required_np:
  1002. of_node_put(required_np);
  1003. return pstate;
  1004. }
  1005. EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
  1006. /**
  1007. * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
  1008. * @opp: opp for which DT node has to be returned for
  1009. *
  1010. * Return: DT node corresponding to the opp, else 0 on success.
  1011. *
  1012. * The caller needs to put the node with of_node_put() after using it.
  1013. */
  1014. struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
  1015. {
  1016. if (IS_ERR_OR_NULL(opp)) {
  1017. pr_err("%s: Invalid parameters\n", __func__);
  1018. return NULL;
  1019. }
  1020. return of_node_get(opp->np);
  1021. }
  1022. EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
  1023. /*
  1024. * Callback function provided to the Energy Model framework upon registration.
  1025. * This computes the power estimated by @dev at @kHz if it is the frequency
  1026. * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
  1027. * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
  1028. * frequency and @mW to the associated power. The power is estimated as
  1029. * P = C * V^2 * f with C being the device's capacitance and V and f
  1030. * respectively the voltage and frequency of the OPP.
  1031. *
  1032. * Returns -EINVAL if the power calculation failed because of missing
  1033. * parameters, 0 otherwise.
  1034. */
  1035. static int __maybe_unused _get_power(unsigned long *mW, unsigned long *kHz,
  1036. struct device *dev)
  1037. {
  1038. struct dev_pm_opp *opp;
  1039. struct device_node *np;
  1040. unsigned long mV, Hz;
  1041. u32 cap;
  1042. u64 tmp;
  1043. int ret;
  1044. np = of_node_get(dev->of_node);
  1045. if (!np)
  1046. return -EINVAL;
  1047. ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
  1048. of_node_put(np);
  1049. if (ret)
  1050. return -EINVAL;
  1051. Hz = *kHz * 1000;
  1052. opp = dev_pm_opp_find_freq_ceil(dev, &Hz);
  1053. if (IS_ERR(opp))
  1054. return -EINVAL;
  1055. mV = dev_pm_opp_get_voltage(opp) / 1000;
  1056. dev_pm_opp_put(opp);
  1057. if (!mV)
  1058. return -EINVAL;
  1059. tmp = (u64)cap * mV * mV * (Hz / 1000000);
  1060. do_div(tmp, 1000000000);
  1061. *mW = (unsigned long)tmp;
  1062. *kHz = Hz / 1000;
  1063. return 0;
  1064. }
  1065. /**
  1066. * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
  1067. * @dev : Device for which an Energy Model has to be registered
  1068. * @cpus : CPUs for which an Energy Model has to be registered. For
  1069. * other type of devices it should be set to NULL.
  1070. *
  1071. * This checks whether the "dynamic-power-coefficient" devicetree property has
  1072. * been specified, and tries to register an Energy Model with it if it has.
  1073. * Having this property means the voltages are known for OPPs and the EM
  1074. * might be calculated.
  1075. */
  1076. int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
  1077. {
  1078. struct em_data_callback em_cb = EM_DATA_CB(_get_power);
  1079. struct device_node *np;
  1080. int ret, nr_opp;
  1081. u32 cap;
  1082. if (IS_ERR_OR_NULL(dev)) {
  1083. ret = -EINVAL;
  1084. goto failed;
  1085. }
  1086. nr_opp = dev_pm_opp_get_opp_count(dev);
  1087. if (nr_opp <= 0) {
  1088. ret = -EINVAL;
  1089. goto failed;
  1090. }
  1091. np = of_node_get(dev->of_node);
  1092. if (!np) {
  1093. ret = -EINVAL;
  1094. goto failed;
  1095. }
  1096. /*
  1097. * Register an EM only if the 'dynamic-power-coefficient' property is
  1098. * set in devicetree. It is assumed the voltage values are known if that
  1099. * property is set since it is useless otherwise. If voltages are not
  1100. * known, just let the EM registration fail with an error to alert the
  1101. * user about the inconsistent configuration.
  1102. */
  1103. ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
  1104. of_node_put(np);
  1105. if (ret || !cap) {
  1106. dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n");
  1107. ret = -EINVAL;
  1108. goto failed;
  1109. }
  1110. ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true);
  1111. if (ret)
  1112. goto failed;
  1113. return 0;
  1114. failed:
  1115. dev_dbg(dev, "Couldn't register Energy Model %d\n", ret);
  1116. return ret;
  1117. }
  1118. EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);