inkern.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* The industrial I/O core in kernel channel mapping
  3. *
  4. * Copyright (c) 2011 Jonathan Cameron
  5. */
  6. #include <linux/err.h>
  7. #include <linux/export.h>
  8. #include <linux/slab.h>
  9. #include <linux/mutex.h>
  10. #include <linux/of.h>
  11. #include <linux/iio/iio.h>
  12. #include "iio_core.h"
  13. #include <linux/iio/machine.h>
  14. #include <linux/iio/driver.h>
  15. #include <linux/iio/consumer.h>
  16. struct iio_map_internal {
  17. struct iio_dev *indio_dev;
  18. struct iio_map *map;
  19. struct list_head l;
  20. };
  21. static LIST_HEAD(iio_map_list);
  22. static DEFINE_MUTEX(iio_map_list_lock);
  23. int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
  24. {
  25. int i = 0, ret = 0;
  26. struct iio_map_internal *mapi;
  27. if (maps == NULL)
  28. return 0;
  29. mutex_lock(&iio_map_list_lock);
  30. while (maps[i].consumer_dev_name != NULL) {
  31. mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
  32. if (mapi == NULL) {
  33. ret = -ENOMEM;
  34. goto error_ret;
  35. }
  36. mapi->map = &maps[i];
  37. mapi->indio_dev = indio_dev;
  38. list_add_tail(&mapi->l, &iio_map_list);
  39. i++;
  40. }
  41. error_ret:
  42. mutex_unlock(&iio_map_list_lock);
  43. return ret;
  44. }
  45. EXPORT_SYMBOL_GPL(iio_map_array_register);
  46. /*
  47. * Remove all map entries associated with the given iio device
  48. */
  49. int iio_map_array_unregister(struct iio_dev *indio_dev)
  50. {
  51. int ret = -ENODEV;
  52. struct iio_map_internal *mapi, *next;
  53. mutex_lock(&iio_map_list_lock);
  54. list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
  55. if (indio_dev == mapi->indio_dev) {
  56. list_del(&mapi->l);
  57. kfree(mapi);
  58. ret = 0;
  59. }
  60. }
  61. mutex_unlock(&iio_map_list_lock);
  62. return ret;
  63. }
  64. EXPORT_SYMBOL_GPL(iio_map_array_unregister);
  65. static const struct iio_chan_spec
  66. *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
  67. {
  68. int i;
  69. const struct iio_chan_spec *chan = NULL;
  70. for (i = 0; i < indio_dev->num_channels; i++)
  71. if (indio_dev->channels[i].datasheet_name &&
  72. strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
  73. chan = &indio_dev->channels[i];
  74. break;
  75. }
  76. return chan;
  77. }
  78. #ifdef CONFIG_OF
  79. static int iio_dev_node_match(struct device *dev, const void *data)
  80. {
  81. return dev->of_node == data && dev->type == &iio_device_type;
  82. }
  83. /**
  84. * __of_iio_simple_xlate - translate iiospec to the IIO channel index
  85. * @indio_dev: pointer to the iio_dev structure
  86. * @iiospec: IIO specifier as found in the device tree
  87. *
  88. * This is simple translation function, suitable for the most 1:1 mapped
  89. * channels in IIO chips. This function performs only one sanity check:
  90. * whether IIO index is less than num_channels (that is specified in the
  91. * iio_dev).
  92. */
  93. static int __of_iio_simple_xlate(struct iio_dev *indio_dev,
  94. const struct of_phandle_args *iiospec)
  95. {
  96. if (!iiospec->args_count)
  97. return 0;
  98. if (iiospec->args[0] >= indio_dev->num_channels) {
  99. dev_err(&indio_dev->dev, "invalid channel index %u\n",
  100. iiospec->args[0]);
  101. return -EINVAL;
  102. }
  103. return iiospec->args[0];
  104. }
  105. static int __of_iio_channel_get(struct iio_channel *channel,
  106. struct device_node *np, int index)
  107. {
  108. struct device *idev;
  109. struct iio_dev *indio_dev;
  110. int err;
  111. struct of_phandle_args iiospec;
  112. err = of_parse_phandle_with_args(np, "io-channels",
  113. "#io-channel-cells",
  114. index, &iiospec);
  115. if (err)
  116. return err;
  117. idev = bus_find_device(&iio_bus_type, NULL, iiospec.np,
  118. iio_dev_node_match);
  119. of_node_put(iiospec.np);
  120. if (idev == NULL)
  121. return -EPROBE_DEFER;
  122. indio_dev = dev_to_iio_dev(idev);
  123. channel->indio_dev = indio_dev;
  124. if (indio_dev->info->of_xlate)
  125. index = indio_dev->info->of_xlate(indio_dev, &iiospec);
  126. else
  127. index = __of_iio_simple_xlate(indio_dev, &iiospec);
  128. if (index < 0)
  129. goto err_put;
  130. channel->channel = &indio_dev->channels[index];
  131. return 0;
  132. err_put:
  133. iio_device_put(indio_dev);
  134. return index;
  135. }
  136. static struct iio_channel *of_iio_channel_get(struct device_node *np, int index)
  137. {
  138. struct iio_channel *channel;
  139. int err;
  140. if (index < 0)
  141. return ERR_PTR(-EINVAL);
  142. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  143. if (channel == NULL)
  144. return ERR_PTR(-ENOMEM);
  145. err = __of_iio_channel_get(channel, np, index);
  146. if (err)
  147. goto err_free_channel;
  148. return channel;
  149. err_free_channel:
  150. kfree(channel);
  151. return ERR_PTR(err);
  152. }
  153. static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
  154. const char *name)
  155. {
  156. struct iio_channel *chan = NULL;
  157. /* Walk up the tree of devices looking for a matching iio channel */
  158. while (np) {
  159. int index = 0;
  160. /*
  161. * For named iio channels, first look up the name in the
  162. * "io-channel-names" property. If it cannot be found, the
  163. * index will be an error code, and of_iio_channel_get()
  164. * will fail.
  165. */
  166. if (name)
  167. index = of_property_match_string(np, "io-channel-names",
  168. name);
  169. chan = of_iio_channel_get(np, index);
  170. if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
  171. break;
  172. else if (name && index >= 0) {
  173. pr_err("ERROR: could not get IIO channel %pOF:%s(%i)\n",
  174. np, name ? name : "", index);
  175. return NULL;
  176. }
  177. /*
  178. * No matching IIO channel found on this node.
  179. * If the parent node has a "io-channel-ranges" property,
  180. * then we can try one of its channels.
  181. */
  182. np = np->parent;
  183. if (np && !of_get_property(np, "io-channel-ranges", NULL))
  184. return NULL;
  185. }
  186. return chan;
  187. }
  188. static struct iio_channel *of_iio_channel_get_all(struct device *dev)
  189. {
  190. struct iio_channel *chans;
  191. int i, mapind, nummaps = 0;
  192. int ret;
  193. do {
  194. ret = of_parse_phandle_with_args(dev->of_node,
  195. "io-channels",
  196. "#io-channel-cells",
  197. nummaps, NULL);
  198. if (ret < 0)
  199. break;
  200. } while (++nummaps);
  201. if (nummaps == 0) /* no error, return NULL to search map table */
  202. return NULL;
  203. /* NULL terminated array to save passing size */
  204. chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
  205. if (chans == NULL)
  206. return ERR_PTR(-ENOMEM);
  207. /* Search for OF matches */
  208. for (mapind = 0; mapind < nummaps; mapind++) {
  209. ret = __of_iio_channel_get(&chans[mapind], dev->of_node,
  210. mapind);
  211. if (ret)
  212. goto error_free_chans;
  213. }
  214. return chans;
  215. error_free_chans:
  216. for (i = 0; i < mapind; i++)
  217. iio_device_put(chans[i].indio_dev);
  218. kfree(chans);
  219. return ERR_PTR(ret);
  220. }
  221. #else /* CONFIG_OF */
  222. static inline struct iio_channel *
  223. of_iio_channel_get_by_name(struct device_node *np, const char *name)
  224. {
  225. return NULL;
  226. }
  227. static inline struct iio_channel *of_iio_channel_get_all(struct device *dev)
  228. {
  229. return NULL;
  230. }
  231. #endif /* CONFIG_OF */
  232. static struct iio_channel *iio_channel_get_sys(const char *name,
  233. const char *channel_name)
  234. {
  235. struct iio_map_internal *c_i = NULL, *c = NULL;
  236. struct iio_channel *channel;
  237. int err;
  238. if (name == NULL && channel_name == NULL)
  239. return ERR_PTR(-ENODEV);
  240. /* first find matching entry the channel map */
  241. mutex_lock(&iio_map_list_lock);
  242. list_for_each_entry(c_i, &iio_map_list, l) {
  243. if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
  244. (channel_name &&
  245. strcmp(channel_name, c_i->map->consumer_channel) != 0))
  246. continue;
  247. c = c_i;
  248. iio_device_get(c->indio_dev);
  249. break;
  250. }
  251. mutex_unlock(&iio_map_list_lock);
  252. if (c == NULL)
  253. return ERR_PTR(-ENODEV);
  254. channel = kzalloc(sizeof(*channel), GFP_KERNEL);
  255. if (channel == NULL) {
  256. err = -ENOMEM;
  257. goto error_no_mem;
  258. }
  259. channel->indio_dev = c->indio_dev;
  260. if (c->map->adc_channel_label) {
  261. channel->channel =
  262. iio_chan_spec_from_name(channel->indio_dev,
  263. c->map->adc_channel_label);
  264. if (channel->channel == NULL) {
  265. err = -EINVAL;
  266. goto error_no_chan;
  267. }
  268. }
  269. return channel;
  270. error_no_chan:
  271. kfree(channel);
  272. error_no_mem:
  273. iio_device_put(c->indio_dev);
  274. return ERR_PTR(err);
  275. }
  276. struct iio_channel *iio_channel_get(struct device *dev,
  277. const char *channel_name)
  278. {
  279. const char *name = dev ? dev_name(dev) : NULL;
  280. struct iio_channel *channel;
  281. if (dev) {
  282. channel = of_iio_channel_get_by_name(dev->of_node,
  283. channel_name);
  284. if (channel != NULL)
  285. return channel;
  286. }
  287. return iio_channel_get_sys(name, channel_name);
  288. }
  289. EXPORT_SYMBOL_GPL(iio_channel_get);
  290. void iio_channel_release(struct iio_channel *channel)
  291. {
  292. if (!channel)
  293. return;
  294. iio_device_put(channel->indio_dev);
  295. kfree(channel);
  296. }
  297. EXPORT_SYMBOL_GPL(iio_channel_release);
  298. static void devm_iio_channel_free(struct device *dev, void *res)
  299. {
  300. struct iio_channel *channel = *(struct iio_channel **)res;
  301. iio_channel_release(channel);
  302. }
  303. struct iio_channel *devm_iio_channel_get(struct device *dev,
  304. const char *channel_name)
  305. {
  306. struct iio_channel **ptr, *channel;
  307. ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
  308. if (!ptr)
  309. return ERR_PTR(-ENOMEM);
  310. channel = iio_channel_get(dev, channel_name);
  311. if (IS_ERR(channel)) {
  312. devres_free(ptr);
  313. return channel;
  314. }
  315. *ptr = channel;
  316. devres_add(dev, ptr);
  317. return channel;
  318. }
  319. EXPORT_SYMBOL_GPL(devm_iio_channel_get);
  320. struct iio_channel *iio_channel_get_all(struct device *dev)
  321. {
  322. const char *name;
  323. struct iio_channel *chans;
  324. struct iio_map_internal *c = NULL;
  325. int nummaps = 0;
  326. int mapind = 0;
  327. int i, ret;
  328. if (dev == NULL)
  329. return ERR_PTR(-EINVAL);
  330. chans = of_iio_channel_get_all(dev);
  331. if (chans)
  332. return chans;
  333. name = dev_name(dev);
  334. mutex_lock(&iio_map_list_lock);
  335. /* first count the matching maps */
  336. list_for_each_entry(c, &iio_map_list, l)
  337. if (name && strcmp(name, c->map->consumer_dev_name) != 0)
  338. continue;
  339. else
  340. nummaps++;
  341. if (nummaps == 0) {
  342. ret = -ENODEV;
  343. goto error_ret;
  344. }
  345. /* NULL terminated array to save passing size */
  346. chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
  347. if (chans == NULL) {
  348. ret = -ENOMEM;
  349. goto error_ret;
  350. }
  351. /* for each map fill in the chans element */
  352. list_for_each_entry(c, &iio_map_list, l) {
  353. if (name && strcmp(name, c->map->consumer_dev_name) != 0)
  354. continue;
  355. chans[mapind].indio_dev = c->indio_dev;
  356. chans[mapind].data = c->map->consumer_data;
  357. chans[mapind].channel =
  358. iio_chan_spec_from_name(chans[mapind].indio_dev,
  359. c->map->adc_channel_label);
  360. if (chans[mapind].channel == NULL) {
  361. ret = -EINVAL;
  362. goto error_free_chans;
  363. }
  364. iio_device_get(chans[mapind].indio_dev);
  365. mapind++;
  366. }
  367. if (mapind == 0) {
  368. ret = -ENODEV;
  369. goto error_free_chans;
  370. }
  371. mutex_unlock(&iio_map_list_lock);
  372. return chans;
  373. error_free_chans:
  374. for (i = 0; i < nummaps; i++)
  375. iio_device_put(chans[i].indio_dev);
  376. kfree(chans);
  377. error_ret:
  378. mutex_unlock(&iio_map_list_lock);
  379. return ERR_PTR(ret);
  380. }
  381. EXPORT_SYMBOL_GPL(iio_channel_get_all);
  382. void iio_channel_release_all(struct iio_channel *channels)
  383. {
  384. struct iio_channel *chan = &channels[0];
  385. while (chan->indio_dev) {
  386. iio_device_put(chan->indio_dev);
  387. chan++;
  388. }
  389. kfree(channels);
  390. }
  391. EXPORT_SYMBOL_GPL(iio_channel_release_all);
  392. static void devm_iio_channel_free_all(struct device *dev, void *res)
  393. {
  394. struct iio_channel *channels = *(struct iio_channel **)res;
  395. iio_channel_release_all(channels);
  396. }
  397. struct iio_channel *devm_iio_channel_get_all(struct device *dev)
  398. {
  399. struct iio_channel **ptr, *channels;
  400. ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL);
  401. if (!ptr)
  402. return ERR_PTR(-ENOMEM);
  403. channels = iio_channel_get_all(dev);
  404. if (IS_ERR(channels)) {
  405. devres_free(ptr);
  406. return channels;
  407. }
  408. *ptr = channels;
  409. devres_add(dev, ptr);
  410. return channels;
  411. }
  412. EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
  413. static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
  414. enum iio_chan_info_enum info)
  415. {
  416. int unused;
  417. int vals[INDIO_MAX_RAW_ELEMENTS];
  418. int ret;
  419. int val_len = 2;
  420. if (val2 == NULL)
  421. val2 = &unused;
  422. if (!iio_channel_has_info(chan->channel, info))
  423. return -EINVAL;
  424. if (chan->indio_dev->info->read_raw_multi) {
  425. ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
  426. chan->channel, INDIO_MAX_RAW_ELEMENTS,
  427. vals, &val_len, info);
  428. *val = vals[0];
  429. *val2 = vals[1];
  430. } else
  431. ret = chan->indio_dev->info->read_raw(chan->indio_dev,
  432. chan->channel, val, val2, info);
  433. return ret;
  434. }
  435. int iio_read_channel_raw(struct iio_channel *chan, int *val)
  436. {
  437. int ret;
  438. mutex_lock(&chan->indio_dev->info_exist_lock);
  439. if (chan->indio_dev->info == NULL) {
  440. ret = -ENODEV;
  441. goto err_unlock;
  442. }
  443. ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
  444. err_unlock:
  445. mutex_unlock(&chan->indio_dev->info_exist_lock);
  446. return ret;
  447. }
  448. EXPORT_SYMBOL_GPL(iio_read_channel_raw);
  449. int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
  450. {
  451. int ret;
  452. mutex_lock(&chan->indio_dev->info_exist_lock);
  453. if (chan->indio_dev->info == NULL) {
  454. ret = -ENODEV;
  455. goto err_unlock;
  456. }
  457. ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
  458. err_unlock:
  459. mutex_unlock(&chan->indio_dev->info_exist_lock);
  460. return ret;
  461. }
  462. EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
  463. static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
  464. int raw, int *processed, unsigned int scale)
  465. {
  466. int scale_type, scale_val, scale_val2;
  467. int offset_type, offset_val, offset_val2;
  468. s64 raw64 = raw;
  469. offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
  470. IIO_CHAN_INFO_OFFSET);
  471. if (offset_type >= 0) {
  472. switch (offset_type) {
  473. case IIO_VAL_INT:
  474. break;
  475. case IIO_VAL_INT_PLUS_MICRO:
  476. case IIO_VAL_INT_PLUS_NANO:
  477. /*
  478. * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
  479. * implicitely truncate the offset to it's integer form.
  480. */
  481. break;
  482. case IIO_VAL_FRACTIONAL:
  483. offset_val /= offset_val2;
  484. break;
  485. case IIO_VAL_FRACTIONAL_LOG2:
  486. offset_val >>= offset_val2;
  487. break;
  488. default:
  489. return -EINVAL;
  490. }
  491. raw64 += offset_val;
  492. }
  493. scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
  494. IIO_CHAN_INFO_SCALE);
  495. if (scale_type < 0) {
  496. /*
  497. * If no channel scaling is available apply consumer scale to
  498. * raw value and return.
  499. */
  500. *processed = raw * scale;
  501. return 0;
  502. }
  503. switch (scale_type) {
  504. case IIO_VAL_INT:
  505. *processed = raw64 * scale_val * scale;
  506. break;
  507. case IIO_VAL_INT_PLUS_MICRO:
  508. if (scale_val2 < 0)
  509. *processed = -raw64 * scale_val;
  510. else
  511. *processed = raw64 * scale_val;
  512. *processed += div_s64(raw64 * (s64)scale_val2 * scale,
  513. 1000000LL);
  514. break;
  515. case IIO_VAL_INT_PLUS_NANO:
  516. if (scale_val2 < 0)
  517. *processed = -raw64 * scale_val;
  518. else
  519. *processed = raw64 * scale_val;
  520. *processed += div_s64(raw64 * (s64)scale_val2 * scale,
  521. 1000000000LL);
  522. break;
  523. case IIO_VAL_FRACTIONAL:
  524. *processed = div_s64(raw64 * (s64)scale_val * scale,
  525. scale_val2);
  526. break;
  527. case IIO_VAL_FRACTIONAL_LOG2:
  528. *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
  529. break;
  530. default:
  531. return -EINVAL;
  532. }
  533. return 0;
  534. }
  535. int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
  536. int *processed, unsigned int scale)
  537. {
  538. int ret;
  539. mutex_lock(&chan->indio_dev->info_exist_lock);
  540. if (chan->indio_dev->info == NULL) {
  541. ret = -ENODEV;
  542. goto err_unlock;
  543. }
  544. ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed,
  545. scale);
  546. err_unlock:
  547. mutex_unlock(&chan->indio_dev->info_exist_lock);
  548. return ret;
  549. }
  550. EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
  551. int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
  552. enum iio_chan_info_enum attribute)
  553. {
  554. int ret;
  555. mutex_lock(&chan->indio_dev->info_exist_lock);
  556. if (chan->indio_dev->info == NULL) {
  557. ret = -ENODEV;
  558. goto err_unlock;
  559. }
  560. ret = iio_channel_read(chan, val, val2, attribute);
  561. err_unlock:
  562. mutex_unlock(&chan->indio_dev->info_exist_lock);
  563. return ret;
  564. }
  565. EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
  566. int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
  567. {
  568. return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
  569. }
  570. EXPORT_SYMBOL_GPL(iio_read_channel_offset);
  571. int iio_read_channel_processed(struct iio_channel *chan, int *val)
  572. {
  573. int ret;
  574. mutex_lock(&chan->indio_dev->info_exist_lock);
  575. if (chan->indio_dev->info == NULL) {
  576. ret = -ENODEV;
  577. goto err_unlock;
  578. }
  579. if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
  580. ret = iio_channel_read(chan, val, NULL,
  581. IIO_CHAN_INFO_PROCESSED);
  582. } else {
  583. ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
  584. if (ret < 0)
  585. goto err_unlock;
  586. ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 1);
  587. }
  588. err_unlock:
  589. mutex_unlock(&chan->indio_dev->info_exist_lock);
  590. return ret;
  591. }
  592. EXPORT_SYMBOL_GPL(iio_read_channel_processed);
  593. int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
  594. {
  595. return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
  596. }
  597. EXPORT_SYMBOL_GPL(iio_read_channel_scale);
  598. static int iio_channel_read_avail(struct iio_channel *chan,
  599. const int **vals, int *type, int *length,
  600. enum iio_chan_info_enum info)
  601. {
  602. if (!iio_channel_has_available(chan->channel, info))
  603. return -EINVAL;
  604. return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel,
  605. vals, type, length, info);
  606. }
  607. int iio_read_avail_channel_attribute(struct iio_channel *chan,
  608. const int **vals, int *type, int *length,
  609. enum iio_chan_info_enum attribute)
  610. {
  611. int ret;
  612. mutex_lock(&chan->indio_dev->info_exist_lock);
  613. if (!chan->indio_dev->info) {
  614. ret = -ENODEV;
  615. goto err_unlock;
  616. }
  617. ret = iio_channel_read_avail(chan, vals, type, length, attribute);
  618. err_unlock:
  619. mutex_unlock(&chan->indio_dev->info_exist_lock);
  620. return ret;
  621. }
  622. EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
  623. int iio_read_avail_channel_raw(struct iio_channel *chan,
  624. const int **vals, int *length)
  625. {
  626. int ret;
  627. int type;
  628. ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
  629. IIO_CHAN_INFO_RAW);
  630. if (ret >= 0 && type != IIO_VAL_INT)
  631. /* raw values are assumed to be IIO_VAL_INT */
  632. ret = -EINVAL;
  633. return ret;
  634. }
  635. EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
  636. static int iio_channel_read_max(struct iio_channel *chan,
  637. int *val, int *val2, int *type,
  638. enum iio_chan_info_enum info)
  639. {
  640. int unused;
  641. const int *vals;
  642. int length;
  643. int ret;
  644. if (!val2)
  645. val2 = &unused;
  646. ret = iio_channel_read_avail(chan, &vals, type, &length, info);
  647. switch (ret) {
  648. case IIO_AVAIL_RANGE:
  649. switch (*type) {
  650. case IIO_VAL_INT:
  651. *val = vals[2];
  652. break;
  653. default:
  654. *val = vals[4];
  655. *val2 = vals[5];
  656. }
  657. return 0;
  658. case IIO_AVAIL_LIST:
  659. if (length <= 0)
  660. return -EINVAL;
  661. switch (*type) {
  662. case IIO_VAL_INT:
  663. *val = vals[--length];
  664. while (length) {
  665. if (vals[--length] > *val)
  666. *val = vals[length];
  667. }
  668. break;
  669. default:
  670. /* FIXME: learn about max for other iio values */
  671. return -EINVAL;
  672. }
  673. return 0;
  674. default:
  675. return ret;
  676. }
  677. }
  678. int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
  679. {
  680. int ret;
  681. int type;
  682. mutex_lock(&chan->indio_dev->info_exist_lock);
  683. if (!chan->indio_dev->info) {
  684. ret = -ENODEV;
  685. goto err_unlock;
  686. }
  687. ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
  688. err_unlock:
  689. mutex_unlock(&chan->indio_dev->info_exist_lock);
  690. return ret;
  691. }
  692. EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
  693. int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
  694. {
  695. int ret = 0;
  696. /* Need to verify underlying driver has not gone away */
  697. mutex_lock(&chan->indio_dev->info_exist_lock);
  698. if (chan->indio_dev->info == NULL) {
  699. ret = -ENODEV;
  700. goto err_unlock;
  701. }
  702. *type = chan->channel->type;
  703. err_unlock:
  704. mutex_unlock(&chan->indio_dev->info_exist_lock);
  705. return ret;
  706. }
  707. EXPORT_SYMBOL_GPL(iio_get_channel_type);
  708. static int iio_channel_write(struct iio_channel *chan, int val, int val2,
  709. enum iio_chan_info_enum info)
  710. {
  711. return chan->indio_dev->info->write_raw(chan->indio_dev,
  712. chan->channel, val, val2, info);
  713. }
  714. int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
  715. enum iio_chan_info_enum attribute)
  716. {
  717. int ret;
  718. mutex_lock(&chan->indio_dev->info_exist_lock);
  719. if (chan->indio_dev->info == NULL) {
  720. ret = -ENODEV;
  721. goto err_unlock;
  722. }
  723. ret = iio_channel_write(chan, val, val2, attribute);
  724. err_unlock:
  725. mutex_unlock(&chan->indio_dev->info_exist_lock);
  726. return ret;
  727. }
  728. EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
  729. int iio_write_channel_raw(struct iio_channel *chan, int val)
  730. {
  731. return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
  732. }
  733. EXPORT_SYMBOL_GPL(iio_write_channel_raw);
  734. unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
  735. {
  736. const struct iio_chan_spec_ext_info *ext_info;
  737. unsigned int i = 0;
  738. if (!chan->channel->ext_info)
  739. return i;
  740. for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
  741. ++i;
  742. return i;
  743. }
  744. EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
  745. static const struct iio_chan_spec_ext_info *iio_lookup_ext_info(
  746. const struct iio_channel *chan,
  747. const char *attr)
  748. {
  749. const struct iio_chan_spec_ext_info *ext_info;
  750. if (!chan->channel->ext_info)
  751. return NULL;
  752. for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
  753. if (!strcmp(attr, ext_info->name))
  754. return ext_info;
  755. }
  756. return NULL;
  757. }
  758. ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
  759. const char *attr, char *buf)
  760. {
  761. const struct iio_chan_spec_ext_info *ext_info;
  762. ext_info = iio_lookup_ext_info(chan, attr);
  763. if (!ext_info)
  764. return -EINVAL;
  765. return ext_info->read(chan->indio_dev, ext_info->private,
  766. chan->channel, buf);
  767. }
  768. EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
  769. ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
  770. const char *buf, size_t len)
  771. {
  772. const struct iio_chan_spec_ext_info *ext_info;
  773. ext_info = iio_lookup_ext_info(chan, attr);
  774. if (!ext_info)
  775. return -EINVAL;
  776. return ext_info->write(chan->indio_dev, ext_info->private,
  777. chan->channel, buf, len);
  778. }
  779. EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);