industrialio-buffer.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* The industrial I/O core
  3. *
  4. * Copyright (c) 2008 Jonathan Cameron
  5. *
  6. * Handling of buffer allocation / resizing.
  7. *
  8. * Things to look at here.
  9. * - Better memory allocation techniques?
  10. * - Alternative access techniques?
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/export.h>
  14. #include <linux/device.h>
  15. #include <linux/fs.h>
  16. #include <linux/cdev.h>
  17. #include <linux/slab.h>
  18. #include <linux/poll.h>
  19. #include <linux/sched/signal.h>
  20. #include <linux/iio/iio.h>
  21. #include <linux/iio/iio-opaque.h>
  22. #include "iio_core.h"
  23. #include "iio_core_trigger.h"
  24. #include <linux/iio/sysfs.h>
  25. #include <linux/iio/buffer.h>
  26. #include <linux/iio/buffer_impl.h>
  27. static const char * const iio_endian_prefix[] = {
  28. [IIO_BE] = "be",
  29. [IIO_LE] = "le",
  30. };
  31. static bool iio_buffer_is_active(struct iio_buffer *buf)
  32. {
  33. return !list_empty(&buf->buffer_list);
  34. }
  35. static size_t iio_buffer_data_available(struct iio_buffer *buf)
  36. {
  37. return buf->access->data_available(buf);
  38. }
  39. static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
  40. struct iio_buffer *buf, size_t required)
  41. {
  42. if (!indio_dev->info->hwfifo_flush_to_buffer)
  43. return -ENODEV;
  44. return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
  45. }
  46. static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
  47. size_t to_wait, int to_flush)
  48. {
  49. size_t avail;
  50. int flushed = 0;
  51. /* wakeup if the device was unregistered */
  52. if (!indio_dev->info)
  53. return true;
  54. /* drain the buffer if it was disabled */
  55. if (!iio_buffer_is_active(buf)) {
  56. to_wait = min_t(size_t, to_wait, 1);
  57. to_flush = 0;
  58. }
  59. avail = iio_buffer_data_available(buf);
  60. if (avail >= to_wait) {
  61. /* force a flush for non-blocking reads */
  62. if (!to_wait && avail < to_flush)
  63. iio_buffer_flush_hwfifo(indio_dev, buf,
  64. to_flush - avail);
  65. return true;
  66. }
  67. if (to_flush)
  68. flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
  69. to_wait - avail);
  70. if (flushed <= 0)
  71. return false;
  72. if (avail + flushed >= to_wait)
  73. return true;
  74. return false;
  75. }
  76. /**
  77. * iio_buffer_read_outer() - chrdev read for buffer access
  78. * @filp: File structure pointer for the char device
  79. * @buf: Destination buffer for iio buffer read
  80. * @n: First n bytes to read
  81. * @f_ps: Long offset provided by the user as a seek position
  82. *
  83. * This function relies on all buffer implementations having an
  84. * iio_buffer as their first element.
  85. *
  86. * Return: negative values corresponding to error codes or ret != 0
  87. * for ending the reading activity
  88. **/
  89. ssize_t iio_buffer_read_outer(struct file *filp, char __user *buf,
  90. size_t n, loff_t *f_ps)
  91. {
  92. struct iio_dev *indio_dev = filp->private_data;
  93. struct iio_buffer *rb = indio_dev->buffer;
  94. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  95. size_t datum_size;
  96. size_t to_wait;
  97. int ret = 0;
  98. if (!indio_dev->info)
  99. return -ENODEV;
  100. if (!rb || !rb->access->read)
  101. return -EINVAL;
  102. datum_size = rb->bytes_per_datum;
  103. /*
  104. * If datum_size is 0 there will never be anything to read from the
  105. * buffer, so signal end of file now.
  106. */
  107. if (!datum_size)
  108. return 0;
  109. if (filp->f_flags & O_NONBLOCK)
  110. to_wait = 0;
  111. else
  112. to_wait = min_t(size_t, n / datum_size, rb->watermark);
  113. add_wait_queue(&rb->pollq, &wait);
  114. do {
  115. if (!indio_dev->info) {
  116. ret = -ENODEV;
  117. break;
  118. }
  119. if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
  120. if (signal_pending(current)) {
  121. ret = -ERESTARTSYS;
  122. break;
  123. }
  124. wait_woken(&wait, TASK_INTERRUPTIBLE,
  125. MAX_SCHEDULE_TIMEOUT);
  126. continue;
  127. }
  128. ret = rb->access->read(rb, n, buf);
  129. if (ret == 0 && (filp->f_flags & O_NONBLOCK))
  130. ret = -EAGAIN;
  131. } while (ret == 0);
  132. remove_wait_queue(&rb->pollq, &wait);
  133. return ret;
  134. }
  135. /**
  136. * iio_buffer_poll() - poll the buffer to find out if it has data
  137. * @filp: File structure pointer for device access
  138. * @wait: Poll table structure pointer for which the driver adds
  139. * a wait queue
  140. *
  141. * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
  142. * or 0 for other cases
  143. */
  144. __poll_t iio_buffer_poll(struct file *filp,
  145. struct poll_table_struct *wait)
  146. {
  147. struct iio_dev *indio_dev = filp->private_data;
  148. struct iio_buffer *rb = indio_dev->buffer;
  149. if (!indio_dev->info || rb == NULL)
  150. return 0;
  151. poll_wait(filp, &rb->pollq, wait);
  152. if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
  153. return EPOLLIN | EPOLLRDNORM;
  154. return 0;
  155. }
  156. /**
  157. * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
  158. * @indio_dev: The IIO device
  159. *
  160. * Wakes up the event waitqueue used for poll(). Should usually
  161. * be called when the device is unregistered.
  162. */
  163. void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
  164. {
  165. struct iio_buffer *buffer = indio_dev->buffer;
  166. if (!buffer)
  167. return;
  168. wake_up(&buffer->pollq);
  169. }
  170. void iio_buffer_init(struct iio_buffer *buffer)
  171. {
  172. INIT_LIST_HEAD(&buffer->demux_list);
  173. INIT_LIST_HEAD(&buffer->buffer_list);
  174. init_waitqueue_head(&buffer->pollq);
  175. kref_init(&buffer->ref);
  176. if (!buffer->watermark)
  177. buffer->watermark = 1;
  178. }
  179. EXPORT_SYMBOL(iio_buffer_init);
  180. /**
  181. * iio_buffer_set_attrs - Set buffer specific attributes
  182. * @buffer: The buffer for which we are setting attributes
  183. * @attrs: Pointer to a null terminated list of pointers to attributes
  184. */
  185. void iio_buffer_set_attrs(struct iio_buffer *buffer,
  186. const struct attribute **attrs)
  187. {
  188. buffer->attrs = attrs;
  189. }
  190. EXPORT_SYMBOL_GPL(iio_buffer_set_attrs);
  191. static ssize_t iio_show_scan_index(struct device *dev,
  192. struct device_attribute *attr,
  193. char *buf)
  194. {
  195. return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
  196. }
  197. static ssize_t iio_show_fixed_type(struct device *dev,
  198. struct device_attribute *attr,
  199. char *buf)
  200. {
  201. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  202. u8 type = this_attr->c->scan_type.endianness;
  203. if (type == IIO_CPU) {
  204. #ifdef __LITTLE_ENDIAN
  205. type = IIO_LE;
  206. #else
  207. type = IIO_BE;
  208. #endif
  209. }
  210. if (this_attr->c->scan_type.repeat > 1)
  211. return sprintf(buf, "%s:%c%d/%dX%d>>%u\n",
  212. iio_endian_prefix[type],
  213. this_attr->c->scan_type.sign,
  214. this_attr->c->scan_type.realbits,
  215. this_attr->c->scan_type.storagebits,
  216. this_attr->c->scan_type.repeat,
  217. this_attr->c->scan_type.shift);
  218. else
  219. return sprintf(buf, "%s:%c%d/%d>>%u\n",
  220. iio_endian_prefix[type],
  221. this_attr->c->scan_type.sign,
  222. this_attr->c->scan_type.realbits,
  223. this_attr->c->scan_type.storagebits,
  224. this_attr->c->scan_type.shift);
  225. }
  226. static ssize_t iio_scan_el_show(struct device *dev,
  227. struct device_attribute *attr,
  228. char *buf)
  229. {
  230. int ret;
  231. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  232. struct iio_buffer *buffer = indio_dev->buffer;
  233. /* Ensure ret is 0 or 1. */
  234. ret = !!test_bit(to_iio_dev_attr(attr)->address,
  235. buffer->scan_mask);
  236. return sprintf(buf, "%d\n", ret);
  237. }
  238. /* Note NULL used as error indicator as it doesn't make sense. */
  239. static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
  240. unsigned int masklength,
  241. const unsigned long *mask,
  242. bool strict)
  243. {
  244. if (bitmap_empty(mask, masklength))
  245. return NULL;
  246. while (*av_masks) {
  247. if (strict) {
  248. if (bitmap_equal(mask, av_masks, masklength))
  249. return av_masks;
  250. } else {
  251. if (bitmap_subset(mask, av_masks, masklength))
  252. return av_masks;
  253. }
  254. av_masks += BITS_TO_LONGS(masklength);
  255. }
  256. return NULL;
  257. }
  258. static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
  259. const unsigned long *mask)
  260. {
  261. if (!indio_dev->setup_ops->validate_scan_mask)
  262. return true;
  263. return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
  264. }
  265. /**
  266. * iio_scan_mask_set() - set particular bit in the scan mask
  267. * @indio_dev: the iio device
  268. * @buffer: the buffer whose scan mask we are interested in
  269. * @bit: the bit to be set.
  270. *
  271. * Note that at this point we have no way of knowing what other
  272. * buffers might request, hence this code only verifies that the
  273. * individual buffers request is plausible.
  274. */
  275. static int iio_scan_mask_set(struct iio_dev *indio_dev,
  276. struct iio_buffer *buffer, int bit)
  277. {
  278. const unsigned long *mask;
  279. unsigned long *trialmask;
  280. trialmask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
  281. if (trialmask == NULL)
  282. return -ENOMEM;
  283. if (!indio_dev->masklength) {
  284. WARN(1, "Trying to set scanmask prior to registering buffer\n");
  285. goto err_invalid_mask;
  286. }
  287. bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
  288. set_bit(bit, trialmask);
  289. if (!iio_validate_scan_mask(indio_dev, trialmask))
  290. goto err_invalid_mask;
  291. if (indio_dev->available_scan_masks) {
  292. mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  293. indio_dev->masklength,
  294. trialmask, false);
  295. if (!mask)
  296. goto err_invalid_mask;
  297. }
  298. bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
  299. bitmap_free(trialmask);
  300. return 0;
  301. err_invalid_mask:
  302. bitmap_free(trialmask);
  303. return -EINVAL;
  304. }
  305. static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
  306. {
  307. clear_bit(bit, buffer->scan_mask);
  308. return 0;
  309. }
  310. static int iio_scan_mask_query(struct iio_dev *indio_dev,
  311. struct iio_buffer *buffer, int bit)
  312. {
  313. if (bit > indio_dev->masklength)
  314. return -EINVAL;
  315. if (!buffer->scan_mask)
  316. return 0;
  317. /* Ensure return value is 0 or 1. */
  318. return !!test_bit(bit, buffer->scan_mask);
  319. };
  320. static ssize_t iio_scan_el_store(struct device *dev,
  321. struct device_attribute *attr,
  322. const char *buf,
  323. size_t len)
  324. {
  325. int ret;
  326. bool state;
  327. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  328. struct iio_buffer *buffer = indio_dev->buffer;
  329. struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
  330. ret = strtobool(buf, &state);
  331. if (ret < 0)
  332. return ret;
  333. mutex_lock(&indio_dev->mlock);
  334. if (iio_buffer_is_active(buffer)) {
  335. ret = -EBUSY;
  336. goto error_ret;
  337. }
  338. ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
  339. if (ret < 0)
  340. goto error_ret;
  341. if (!state && ret) {
  342. ret = iio_scan_mask_clear(buffer, this_attr->address);
  343. if (ret)
  344. goto error_ret;
  345. } else if (state && !ret) {
  346. ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
  347. if (ret)
  348. goto error_ret;
  349. }
  350. error_ret:
  351. mutex_unlock(&indio_dev->mlock);
  352. return ret < 0 ? ret : len;
  353. }
  354. static ssize_t iio_scan_el_ts_show(struct device *dev,
  355. struct device_attribute *attr,
  356. char *buf)
  357. {
  358. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  359. struct iio_buffer *buffer = indio_dev->buffer;
  360. return sprintf(buf, "%d\n", buffer->scan_timestamp);
  361. }
  362. static ssize_t iio_scan_el_ts_store(struct device *dev,
  363. struct device_attribute *attr,
  364. const char *buf,
  365. size_t len)
  366. {
  367. int ret;
  368. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  369. struct iio_buffer *buffer = indio_dev->buffer;
  370. bool state;
  371. ret = strtobool(buf, &state);
  372. if (ret < 0)
  373. return ret;
  374. mutex_lock(&indio_dev->mlock);
  375. if (iio_buffer_is_active(buffer)) {
  376. ret = -EBUSY;
  377. goto error_ret;
  378. }
  379. buffer->scan_timestamp = state;
  380. error_ret:
  381. mutex_unlock(&indio_dev->mlock);
  382. return ret ? ret : len;
  383. }
  384. static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
  385. struct iio_buffer *buffer,
  386. const struct iio_chan_spec *chan)
  387. {
  388. int ret, attrcount = 0;
  389. ret = __iio_add_chan_devattr("index",
  390. chan,
  391. &iio_show_scan_index,
  392. NULL,
  393. 0,
  394. IIO_SEPARATE,
  395. &indio_dev->dev,
  396. &buffer->scan_el_dev_attr_list);
  397. if (ret)
  398. return ret;
  399. attrcount++;
  400. ret = __iio_add_chan_devattr("type",
  401. chan,
  402. &iio_show_fixed_type,
  403. NULL,
  404. 0,
  405. 0,
  406. &indio_dev->dev,
  407. &buffer->scan_el_dev_attr_list);
  408. if (ret)
  409. return ret;
  410. attrcount++;
  411. if (chan->type != IIO_TIMESTAMP)
  412. ret = __iio_add_chan_devattr("en",
  413. chan,
  414. &iio_scan_el_show,
  415. &iio_scan_el_store,
  416. chan->scan_index,
  417. 0,
  418. &indio_dev->dev,
  419. &buffer->scan_el_dev_attr_list);
  420. else
  421. ret = __iio_add_chan_devattr("en",
  422. chan,
  423. &iio_scan_el_ts_show,
  424. &iio_scan_el_ts_store,
  425. chan->scan_index,
  426. 0,
  427. &indio_dev->dev,
  428. &buffer->scan_el_dev_attr_list);
  429. if (ret)
  430. return ret;
  431. attrcount++;
  432. ret = attrcount;
  433. return ret;
  434. }
  435. static ssize_t iio_buffer_read_length(struct device *dev,
  436. struct device_attribute *attr,
  437. char *buf)
  438. {
  439. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  440. struct iio_buffer *buffer = indio_dev->buffer;
  441. return sprintf(buf, "%d\n", buffer->length);
  442. }
  443. static ssize_t iio_buffer_write_length(struct device *dev,
  444. struct device_attribute *attr,
  445. const char *buf, size_t len)
  446. {
  447. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  448. struct iio_buffer *buffer = indio_dev->buffer;
  449. unsigned int val;
  450. int ret;
  451. ret = kstrtouint(buf, 10, &val);
  452. if (ret)
  453. return ret;
  454. if (val == buffer->length)
  455. return len;
  456. mutex_lock(&indio_dev->mlock);
  457. if (iio_buffer_is_active(buffer)) {
  458. ret = -EBUSY;
  459. } else {
  460. buffer->access->set_length(buffer, val);
  461. ret = 0;
  462. }
  463. if (ret)
  464. goto out;
  465. if (buffer->length && buffer->length < buffer->watermark)
  466. buffer->watermark = buffer->length;
  467. out:
  468. mutex_unlock(&indio_dev->mlock);
  469. return ret ? ret : len;
  470. }
  471. static ssize_t iio_buffer_show_enable(struct device *dev,
  472. struct device_attribute *attr,
  473. char *buf)
  474. {
  475. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  476. struct iio_buffer *buffer = indio_dev->buffer;
  477. return sprintf(buf, "%d\n", iio_buffer_is_active(buffer));
  478. }
  479. static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
  480. unsigned int scan_index)
  481. {
  482. const struct iio_chan_spec *ch;
  483. unsigned int bytes;
  484. ch = iio_find_channel_from_si(indio_dev, scan_index);
  485. bytes = ch->scan_type.storagebits / 8;
  486. if (ch->scan_type.repeat > 1)
  487. bytes *= ch->scan_type.repeat;
  488. return bytes;
  489. }
  490. static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
  491. {
  492. return iio_storage_bytes_for_si(indio_dev,
  493. indio_dev->scan_index_timestamp);
  494. }
  495. static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
  496. const unsigned long *mask, bool timestamp)
  497. {
  498. unsigned bytes = 0;
  499. int length, i, largest = 0;
  500. /* How much space will the demuxed element take? */
  501. for_each_set_bit(i, mask,
  502. indio_dev->masklength) {
  503. length = iio_storage_bytes_for_si(indio_dev, i);
  504. bytes = ALIGN(bytes, length);
  505. bytes += length;
  506. largest = max(largest, length);
  507. }
  508. if (timestamp) {
  509. length = iio_storage_bytes_for_timestamp(indio_dev);
  510. bytes = ALIGN(bytes, length);
  511. bytes += length;
  512. largest = max(largest, length);
  513. }
  514. bytes = ALIGN(bytes, largest);
  515. return bytes;
  516. }
  517. static void iio_buffer_activate(struct iio_dev *indio_dev,
  518. struct iio_buffer *buffer)
  519. {
  520. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  521. iio_buffer_get(buffer);
  522. list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
  523. }
  524. static void iio_buffer_deactivate(struct iio_buffer *buffer)
  525. {
  526. list_del_init(&buffer->buffer_list);
  527. wake_up_interruptible(&buffer->pollq);
  528. iio_buffer_put(buffer);
  529. }
  530. static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
  531. {
  532. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  533. struct iio_buffer *buffer, *_buffer;
  534. list_for_each_entry_safe(buffer, _buffer,
  535. &iio_dev_opaque->buffer_list, buffer_list)
  536. iio_buffer_deactivate(buffer);
  537. }
  538. static int iio_buffer_enable(struct iio_buffer *buffer,
  539. struct iio_dev *indio_dev)
  540. {
  541. if (!buffer->access->enable)
  542. return 0;
  543. return buffer->access->enable(buffer, indio_dev);
  544. }
  545. static int iio_buffer_disable(struct iio_buffer *buffer,
  546. struct iio_dev *indio_dev)
  547. {
  548. if (!buffer->access->disable)
  549. return 0;
  550. return buffer->access->disable(buffer, indio_dev);
  551. }
  552. static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
  553. struct iio_buffer *buffer)
  554. {
  555. unsigned int bytes;
  556. if (!buffer->access->set_bytes_per_datum)
  557. return;
  558. bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
  559. buffer->scan_timestamp);
  560. buffer->access->set_bytes_per_datum(buffer, bytes);
  561. }
  562. static int iio_buffer_request_update(struct iio_dev *indio_dev,
  563. struct iio_buffer *buffer)
  564. {
  565. int ret;
  566. iio_buffer_update_bytes_per_datum(indio_dev, buffer);
  567. if (buffer->access->request_update) {
  568. ret = buffer->access->request_update(buffer);
  569. if (ret) {
  570. dev_dbg(&indio_dev->dev,
  571. "Buffer not started: buffer parameter update failed (%d)\n",
  572. ret);
  573. return ret;
  574. }
  575. }
  576. return 0;
  577. }
  578. static void iio_free_scan_mask(struct iio_dev *indio_dev,
  579. const unsigned long *mask)
  580. {
  581. /* If the mask is dynamically allocated free it, otherwise do nothing */
  582. if (!indio_dev->available_scan_masks)
  583. bitmap_free(mask);
  584. }
  585. struct iio_device_config {
  586. unsigned int mode;
  587. unsigned int watermark;
  588. const unsigned long *scan_mask;
  589. unsigned int scan_bytes;
  590. bool scan_timestamp;
  591. };
  592. static int iio_verify_update(struct iio_dev *indio_dev,
  593. struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
  594. struct iio_device_config *config)
  595. {
  596. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  597. unsigned long *compound_mask;
  598. const unsigned long *scan_mask;
  599. bool strict_scanmask = false;
  600. struct iio_buffer *buffer;
  601. bool scan_timestamp;
  602. unsigned int modes;
  603. if (insert_buffer &&
  604. bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
  605. dev_dbg(&indio_dev->dev,
  606. "At least one scan element must be enabled first\n");
  607. return -EINVAL;
  608. }
  609. memset(config, 0, sizeof(*config));
  610. config->watermark = ~0;
  611. /*
  612. * If there is just one buffer and we are removing it there is nothing
  613. * to verify.
  614. */
  615. if (remove_buffer && !insert_buffer &&
  616. list_is_singular(&iio_dev_opaque->buffer_list))
  617. return 0;
  618. modes = indio_dev->modes;
  619. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
  620. if (buffer == remove_buffer)
  621. continue;
  622. modes &= buffer->access->modes;
  623. config->watermark = min(config->watermark, buffer->watermark);
  624. }
  625. if (insert_buffer) {
  626. modes &= insert_buffer->access->modes;
  627. config->watermark = min(config->watermark,
  628. insert_buffer->watermark);
  629. }
  630. /* Definitely possible for devices to support both of these. */
  631. if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
  632. config->mode = INDIO_BUFFER_TRIGGERED;
  633. } else if (modes & INDIO_BUFFER_HARDWARE) {
  634. /*
  635. * Keep things simple for now and only allow a single buffer to
  636. * be connected in hardware mode.
  637. */
  638. if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
  639. return -EINVAL;
  640. config->mode = INDIO_BUFFER_HARDWARE;
  641. strict_scanmask = true;
  642. } else if (modes & INDIO_BUFFER_SOFTWARE) {
  643. config->mode = INDIO_BUFFER_SOFTWARE;
  644. } else {
  645. /* Can only occur on first buffer */
  646. if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
  647. dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
  648. return -EINVAL;
  649. }
  650. /* What scan mask do we actually have? */
  651. compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
  652. if (compound_mask == NULL)
  653. return -ENOMEM;
  654. scan_timestamp = false;
  655. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
  656. if (buffer == remove_buffer)
  657. continue;
  658. bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
  659. indio_dev->masklength);
  660. scan_timestamp |= buffer->scan_timestamp;
  661. }
  662. if (insert_buffer) {
  663. bitmap_or(compound_mask, compound_mask,
  664. insert_buffer->scan_mask, indio_dev->masklength);
  665. scan_timestamp |= insert_buffer->scan_timestamp;
  666. }
  667. if (indio_dev->available_scan_masks) {
  668. scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
  669. indio_dev->masklength,
  670. compound_mask,
  671. strict_scanmask);
  672. bitmap_free(compound_mask);
  673. if (scan_mask == NULL)
  674. return -EINVAL;
  675. } else {
  676. scan_mask = compound_mask;
  677. }
  678. config->scan_bytes = iio_compute_scan_bytes(indio_dev,
  679. scan_mask, scan_timestamp);
  680. config->scan_mask = scan_mask;
  681. config->scan_timestamp = scan_timestamp;
  682. return 0;
  683. }
  684. /**
  685. * struct iio_demux_table - table describing demux memcpy ops
  686. * @from: index to copy from
  687. * @to: index to copy to
  688. * @length: how many bytes to copy
  689. * @l: list head used for management
  690. */
  691. struct iio_demux_table {
  692. unsigned from;
  693. unsigned to;
  694. unsigned length;
  695. struct list_head l;
  696. };
  697. static void iio_buffer_demux_free(struct iio_buffer *buffer)
  698. {
  699. struct iio_demux_table *p, *q;
  700. list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
  701. list_del(&p->l);
  702. kfree(p);
  703. }
  704. }
  705. static int iio_buffer_add_demux(struct iio_buffer *buffer,
  706. struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
  707. unsigned int length)
  708. {
  709. if (*p && (*p)->from + (*p)->length == in_loc &&
  710. (*p)->to + (*p)->length == out_loc) {
  711. (*p)->length += length;
  712. } else {
  713. *p = kmalloc(sizeof(**p), GFP_KERNEL);
  714. if (*p == NULL)
  715. return -ENOMEM;
  716. (*p)->from = in_loc;
  717. (*p)->to = out_loc;
  718. (*p)->length = length;
  719. list_add_tail(&(*p)->l, &buffer->demux_list);
  720. }
  721. return 0;
  722. }
  723. static int iio_buffer_update_demux(struct iio_dev *indio_dev,
  724. struct iio_buffer *buffer)
  725. {
  726. int ret, in_ind = -1, out_ind, length;
  727. unsigned in_loc = 0, out_loc = 0;
  728. struct iio_demux_table *p = NULL;
  729. /* Clear out any old demux */
  730. iio_buffer_demux_free(buffer);
  731. kfree(buffer->demux_bounce);
  732. buffer->demux_bounce = NULL;
  733. /* First work out which scan mode we will actually have */
  734. if (bitmap_equal(indio_dev->active_scan_mask,
  735. buffer->scan_mask,
  736. indio_dev->masklength))
  737. return 0;
  738. /* Now we have the two masks, work from least sig and build up sizes */
  739. for_each_set_bit(out_ind,
  740. buffer->scan_mask,
  741. indio_dev->masklength) {
  742. in_ind = find_next_bit(indio_dev->active_scan_mask,
  743. indio_dev->masklength,
  744. in_ind + 1);
  745. while (in_ind != out_ind) {
  746. length = iio_storage_bytes_for_si(indio_dev, in_ind);
  747. /* Make sure we are aligned */
  748. in_loc = roundup(in_loc, length) + length;
  749. in_ind = find_next_bit(indio_dev->active_scan_mask,
  750. indio_dev->masklength,
  751. in_ind + 1);
  752. }
  753. length = iio_storage_bytes_for_si(indio_dev, in_ind);
  754. out_loc = roundup(out_loc, length);
  755. in_loc = roundup(in_loc, length);
  756. ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  757. if (ret)
  758. goto error_clear_mux_table;
  759. out_loc += length;
  760. in_loc += length;
  761. }
  762. /* Relies on scan_timestamp being last */
  763. if (buffer->scan_timestamp) {
  764. length = iio_storage_bytes_for_timestamp(indio_dev);
  765. out_loc = roundup(out_loc, length);
  766. in_loc = roundup(in_loc, length);
  767. ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
  768. if (ret)
  769. goto error_clear_mux_table;
  770. out_loc += length;
  771. in_loc += length;
  772. }
  773. buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
  774. if (buffer->demux_bounce == NULL) {
  775. ret = -ENOMEM;
  776. goto error_clear_mux_table;
  777. }
  778. return 0;
  779. error_clear_mux_table:
  780. iio_buffer_demux_free(buffer);
  781. return ret;
  782. }
  783. static int iio_update_demux(struct iio_dev *indio_dev)
  784. {
  785. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  786. struct iio_buffer *buffer;
  787. int ret;
  788. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
  789. ret = iio_buffer_update_demux(indio_dev, buffer);
  790. if (ret < 0)
  791. goto error_clear_mux_table;
  792. }
  793. return 0;
  794. error_clear_mux_table:
  795. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
  796. iio_buffer_demux_free(buffer);
  797. return ret;
  798. }
  799. static int iio_enable_buffers(struct iio_dev *indio_dev,
  800. struct iio_device_config *config)
  801. {
  802. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  803. struct iio_buffer *buffer;
  804. int ret;
  805. indio_dev->active_scan_mask = config->scan_mask;
  806. indio_dev->scan_timestamp = config->scan_timestamp;
  807. indio_dev->scan_bytes = config->scan_bytes;
  808. indio_dev->currentmode = config->mode;
  809. iio_update_demux(indio_dev);
  810. /* Wind up again */
  811. if (indio_dev->setup_ops->preenable) {
  812. ret = indio_dev->setup_ops->preenable(indio_dev);
  813. if (ret) {
  814. dev_dbg(&indio_dev->dev,
  815. "Buffer not started: buffer preenable failed (%d)\n", ret);
  816. goto err_undo_config;
  817. }
  818. }
  819. if (indio_dev->info->update_scan_mode) {
  820. ret = indio_dev->info
  821. ->update_scan_mode(indio_dev,
  822. indio_dev->active_scan_mask);
  823. if (ret < 0) {
  824. dev_dbg(&indio_dev->dev,
  825. "Buffer not started: update scan mode failed (%d)\n",
  826. ret);
  827. goto err_run_postdisable;
  828. }
  829. }
  830. if (indio_dev->info->hwfifo_set_watermark)
  831. indio_dev->info->hwfifo_set_watermark(indio_dev,
  832. config->watermark);
  833. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
  834. ret = iio_buffer_enable(buffer, indio_dev);
  835. if (ret)
  836. goto err_disable_buffers;
  837. }
  838. if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
  839. ret = iio_trigger_attach_poll_func(indio_dev->trig,
  840. indio_dev->pollfunc);
  841. if (ret)
  842. goto err_disable_buffers;
  843. }
  844. if (indio_dev->setup_ops->postenable) {
  845. ret = indio_dev->setup_ops->postenable(indio_dev);
  846. if (ret) {
  847. dev_dbg(&indio_dev->dev,
  848. "Buffer not started: postenable failed (%d)\n", ret);
  849. goto err_detach_pollfunc;
  850. }
  851. }
  852. return 0;
  853. err_detach_pollfunc:
  854. if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
  855. iio_trigger_detach_poll_func(indio_dev->trig,
  856. indio_dev->pollfunc);
  857. }
  858. err_disable_buffers:
  859. list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
  860. buffer_list)
  861. iio_buffer_disable(buffer, indio_dev);
  862. err_run_postdisable:
  863. if (indio_dev->setup_ops->postdisable)
  864. indio_dev->setup_ops->postdisable(indio_dev);
  865. err_undo_config:
  866. indio_dev->currentmode = INDIO_DIRECT_MODE;
  867. indio_dev->active_scan_mask = NULL;
  868. return ret;
  869. }
  870. static int iio_disable_buffers(struct iio_dev *indio_dev)
  871. {
  872. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  873. struct iio_buffer *buffer;
  874. int ret = 0;
  875. int ret2;
  876. /* Wind down existing buffers - iff there are any */
  877. if (list_empty(&iio_dev_opaque->buffer_list))
  878. return 0;
  879. /*
  880. * If things go wrong at some step in disable we still need to continue
  881. * to perform the other steps, otherwise we leave the device in a
  882. * inconsistent state. We return the error code for the first error we
  883. * encountered.
  884. */
  885. if (indio_dev->setup_ops->predisable) {
  886. ret2 = indio_dev->setup_ops->predisable(indio_dev);
  887. if (ret2 && !ret)
  888. ret = ret2;
  889. }
  890. if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
  891. iio_trigger_detach_poll_func(indio_dev->trig,
  892. indio_dev->pollfunc);
  893. }
  894. list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
  895. ret2 = iio_buffer_disable(buffer, indio_dev);
  896. if (ret2 && !ret)
  897. ret = ret2;
  898. }
  899. if (indio_dev->setup_ops->postdisable) {
  900. ret2 = indio_dev->setup_ops->postdisable(indio_dev);
  901. if (ret2 && !ret)
  902. ret = ret2;
  903. }
  904. iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
  905. indio_dev->active_scan_mask = NULL;
  906. indio_dev->currentmode = INDIO_DIRECT_MODE;
  907. return ret;
  908. }
  909. static int __iio_update_buffers(struct iio_dev *indio_dev,
  910. struct iio_buffer *insert_buffer,
  911. struct iio_buffer *remove_buffer)
  912. {
  913. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  914. struct iio_device_config new_config;
  915. int ret;
  916. ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
  917. &new_config);
  918. if (ret)
  919. return ret;
  920. if (insert_buffer) {
  921. ret = iio_buffer_request_update(indio_dev, insert_buffer);
  922. if (ret)
  923. goto err_free_config;
  924. }
  925. ret = iio_disable_buffers(indio_dev);
  926. if (ret)
  927. goto err_deactivate_all;
  928. if (remove_buffer)
  929. iio_buffer_deactivate(remove_buffer);
  930. if (insert_buffer)
  931. iio_buffer_activate(indio_dev, insert_buffer);
  932. /* If no buffers in list, we are done */
  933. if (list_empty(&iio_dev_opaque->buffer_list))
  934. return 0;
  935. ret = iio_enable_buffers(indio_dev, &new_config);
  936. if (ret)
  937. goto err_deactivate_all;
  938. return 0;
  939. err_deactivate_all:
  940. /*
  941. * We've already verified that the config is valid earlier. If things go
  942. * wrong in either enable or disable the most likely reason is an IO
  943. * error from the device. In this case there is no good recovery
  944. * strategy. Just make sure to disable everything and leave the device
  945. * in a sane state. With a bit of luck the device might come back to
  946. * life again later and userspace can try again.
  947. */
  948. iio_buffer_deactivate_all(indio_dev);
  949. err_free_config:
  950. iio_free_scan_mask(indio_dev, new_config.scan_mask);
  951. return ret;
  952. }
  953. int iio_update_buffers(struct iio_dev *indio_dev,
  954. struct iio_buffer *insert_buffer,
  955. struct iio_buffer *remove_buffer)
  956. {
  957. int ret;
  958. if (insert_buffer == remove_buffer)
  959. return 0;
  960. mutex_lock(&indio_dev->info_exist_lock);
  961. mutex_lock(&indio_dev->mlock);
  962. if (insert_buffer && iio_buffer_is_active(insert_buffer))
  963. insert_buffer = NULL;
  964. if (remove_buffer && !iio_buffer_is_active(remove_buffer))
  965. remove_buffer = NULL;
  966. if (!insert_buffer && !remove_buffer) {
  967. ret = 0;
  968. goto out_unlock;
  969. }
  970. if (indio_dev->info == NULL) {
  971. ret = -ENODEV;
  972. goto out_unlock;
  973. }
  974. ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
  975. out_unlock:
  976. mutex_unlock(&indio_dev->mlock);
  977. mutex_unlock(&indio_dev->info_exist_lock);
  978. return ret;
  979. }
  980. EXPORT_SYMBOL_GPL(iio_update_buffers);
  981. void iio_disable_all_buffers(struct iio_dev *indio_dev)
  982. {
  983. iio_disable_buffers(indio_dev);
  984. iio_buffer_deactivate_all(indio_dev);
  985. }
  986. static ssize_t iio_buffer_store_enable(struct device *dev,
  987. struct device_attribute *attr,
  988. const char *buf,
  989. size_t len)
  990. {
  991. int ret;
  992. bool requested_state;
  993. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  994. struct iio_buffer *buffer = indio_dev->buffer;
  995. bool inlist;
  996. ret = strtobool(buf, &requested_state);
  997. if (ret < 0)
  998. return ret;
  999. mutex_lock(&indio_dev->mlock);
  1000. /* Find out if it is in the list */
  1001. inlist = iio_buffer_is_active(buffer);
  1002. /* Already in desired state */
  1003. if (inlist == requested_state)
  1004. goto done;
  1005. if (requested_state)
  1006. ret = __iio_update_buffers(indio_dev, buffer, NULL);
  1007. else
  1008. ret = __iio_update_buffers(indio_dev, NULL, buffer);
  1009. done:
  1010. mutex_unlock(&indio_dev->mlock);
  1011. return (ret < 0) ? ret : len;
  1012. }
  1013. static const char * const iio_scan_elements_group_name = "scan_elements";
  1014. static ssize_t iio_buffer_show_watermark(struct device *dev,
  1015. struct device_attribute *attr,
  1016. char *buf)
  1017. {
  1018. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  1019. struct iio_buffer *buffer = indio_dev->buffer;
  1020. return sprintf(buf, "%u\n", buffer->watermark);
  1021. }
  1022. static ssize_t iio_buffer_store_watermark(struct device *dev,
  1023. struct device_attribute *attr,
  1024. const char *buf,
  1025. size_t len)
  1026. {
  1027. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  1028. struct iio_buffer *buffer = indio_dev->buffer;
  1029. unsigned int val;
  1030. int ret;
  1031. ret = kstrtouint(buf, 10, &val);
  1032. if (ret)
  1033. return ret;
  1034. if (!val)
  1035. return -EINVAL;
  1036. mutex_lock(&indio_dev->mlock);
  1037. if (val > buffer->length) {
  1038. ret = -EINVAL;
  1039. goto out;
  1040. }
  1041. if (iio_buffer_is_active(buffer)) {
  1042. ret = -EBUSY;
  1043. goto out;
  1044. }
  1045. buffer->watermark = val;
  1046. out:
  1047. mutex_unlock(&indio_dev->mlock);
  1048. return ret ? ret : len;
  1049. }
  1050. static ssize_t iio_dma_show_data_available(struct device *dev,
  1051. struct device_attribute *attr,
  1052. char *buf)
  1053. {
  1054. struct iio_dev *indio_dev = dev_to_iio_dev(dev);
  1055. struct iio_buffer *buffer = indio_dev->buffer;
  1056. return sprintf(buf, "%zu\n", iio_buffer_data_available(buffer));
  1057. }
  1058. static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
  1059. iio_buffer_write_length);
  1060. static struct device_attribute dev_attr_length_ro = __ATTR(length,
  1061. S_IRUGO, iio_buffer_read_length, NULL);
  1062. static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
  1063. iio_buffer_show_enable, iio_buffer_store_enable);
  1064. static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
  1065. iio_buffer_show_watermark, iio_buffer_store_watermark);
  1066. static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
  1067. S_IRUGO, iio_buffer_show_watermark, NULL);
  1068. static DEVICE_ATTR(data_available, S_IRUGO,
  1069. iio_dma_show_data_available, NULL);
  1070. static struct attribute *iio_buffer_attrs[] = {
  1071. &dev_attr_length.attr,
  1072. &dev_attr_enable.attr,
  1073. &dev_attr_watermark.attr,
  1074. &dev_attr_data_available.attr,
  1075. };
  1076. static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
  1077. struct iio_dev *indio_dev)
  1078. {
  1079. struct iio_dev_attr *p;
  1080. struct attribute **attr;
  1081. int ret, i, attrn, attrcount;
  1082. const struct iio_chan_spec *channels;
  1083. attrcount = 0;
  1084. if (buffer->attrs) {
  1085. while (buffer->attrs[attrcount] != NULL)
  1086. attrcount++;
  1087. }
  1088. attr = kcalloc(attrcount + ARRAY_SIZE(iio_buffer_attrs) + 1,
  1089. sizeof(struct attribute *), GFP_KERNEL);
  1090. if (!attr)
  1091. return -ENOMEM;
  1092. memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
  1093. if (!buffer->access->set_length)
  1094. attr[0] = &dev_attr_length_ro.attr;
  1095. if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
  1096. attr[2] = &dev_attr_watermark_ro.attr;
  1097. if (buffer->attrs)
  1098. memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
  1099. sizeof(struct attribute *) * attrcount);
  1100. attr[attrcount + ARRAY_SIZE(iio_buffer_attrs)] = NULL;
  1101. buffer->buffer_group.name = "buffer";
  1102. buffer->buffer_group.attrs = attr;
  1103. indio_dev->groups[indio_dev->groupcounter++] = &buffer->buffer_group;
  1104. attrcount = 0;
  1105. INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
  1106. channels = indio_dev->channels;
  1107. if (channels) {
  1108. /* new magic */
  1109. for (i = 0; i < indio_dev->num_channels; i++) {
  1110. if (channels[i].scan_index < 0)
  1111. continue;
  1112. ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
  1113. &channels[i]);
  1114. if (ret < 0)
  1115. goto error_cleanup_dynamic;
  1116. attrcount += ret;
  1117. if (channels[i].type == IIO_TIMESTAMP)
  1118. indio_dev->scan_index_timestamp =
  1119. channels[i].scan_index;
  1120. }
  1121. if (indio_dev->masklength && buffer->scan_mask == NULL) {
  1122. buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
  1123. GFP_KERNEL);
  1124. if (buffer->scan_mask == NULL) {
  1125. ret = -ENOMEM;
  1126. goto error_cleanup_dynamic;
  1127. }
  1128. }
  1129. }
  1130. buffer->scan_el_group.name = iio_scan_elements_group_name;
  1131. buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
  1132. sizeof(buffer->scan_el_group.attrs[0]),
  1133. GFP_KERNEL);
  1134. if (buffer->scan_el_group.attrs == NULL) {
  1135. ret = -ENOMEM;
  1136. goto error_free_scan_mask;
  1137. }
  1138. attrn = 0;
  1139. list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
  1140. buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
  1141. indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
  1142. return 0;
  1143. error_free_scan_mask:
  1144. bitmap_free(buffer->scan_mask);
  1145. error_cleanup_dynamic:
  1146. iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
  1147. kfree(buffer->buffer_group.attrs);
  1148. return ret;
  1149. }
  1150. int iio_buffer_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
  1151. {
  1152. struct iio_buffer *buffer = indio_dev->buffer;
  1153. const struct iio_chan_spec *channels;
  1154. int i;
  1155. channels = indio_dev->channels;
  1156. if (channels) {
  1157. int ml = indio_dev->masklength;
  1158. for (i = 0; i < indio_dev->num_channels; i++)
  1159. ml = max(ml, channels[i].scan_index + 1);
  1160. indio_dev->masklength = ml;
  1161. }
  1162. if (!buffer)
  1163. return 0;
  1164. return __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev);
  1165. }
  1166. static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer)
  1167. {
  1168. bitmap_free(buffer->scan_mask);
  1169. kfree(buffer->buffer_group.attrs);
  1170. kfree(buffer->scan_el_group.attrs);
  1171. iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
  1172. }
  1173. void iio_buffer_free_sysfs_and_mask(struct iio_dev *indio_dev)
  1174. {
  1175. struct iio_buffer *buffer = indio_dev->buffer;
  1176. if (!buffer)
  1177. return;
  1178. __iio_buffer_free_sysfs_and_mask(buffer);
  1179. }
  1180. /**
  1181. * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
  1182. * @indio_dev: the iio device
  1183. * @mask: scan mask to be checked
  1184. *
  1185. * Return true if exactly one bit is set in the scan mask, false otherwise. It
  1186. * can be used for devices where only one channel can be active for sampling at
  1187. * a time.
  1188. */
  1189. bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
  1190. const unsigned long *mask)
  1191. {
  1192. return bitmap_weight(mask, indio_dev->masklength) == 1;
  1193. }
  1194. EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
  1195. static const void *iio_demux(struct iio_buffer *buffer,
  1196. const void *datain)
  1197. {
  1198. struct iio_demux_table *t;
  1199. if (list_empty(&buffer->demux_list))
  1200. return datain;
  1201. list_for_each_entry(t, &buffer->demux_list, l)
  1202. memcpy(buffer->demux_bounce + t->to,
  1203. datain + t->from, t->length);
  1204. return buffer->demux_bounce;
  1205. }
  1206. static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
  1207. {
  1208. const void *dataout = iio_demux(buffer, data);
  1209. int ret;
  1210. ret = buffer->access->store_to(buffer, dataout);
  1211. if (ret)
  1212. return ret;
  1213. /*
  1214. * We can't just test for watermark to decide if we wake the poll queue
  1215. * because read may request less samples than the watermark.
  1216. */
  1217. wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
  1218. return 0;
  1219. }
  1220. /**
  1221. * iio_push_to_buffers() - push to a registered buffer.
  1222. * @indio_dev: iio_dev structure for device.
  1223. * @data: Full scan.
  1224. */
  1225. int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
  1226. {
  1227. struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
  1228. int ret;
  1229. struct iio_buffer *buf;
  1230. list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
  1231. ret = iio_push_to_buffer(buf, data);
  1232. if (ret < 0)
  1233. return ret;
  1234. }
  1235. return 0;
  1236. }
  1237. EXPORT_SYMBOL_GPL(iio_push_to_buffers);
  1238. /**
  1239. * iio_buffer_release() - Free a buffer's resources
  1240. * @ref: Pointer to the kref embedded in the iio_buffer struct
  1241. *
  1242. * This function is called when the last reference to the buffer has been
  1243. * dropped. It will typically free all resources allocated by the buffer. Do not
  1244. * call this function manually, always use iio_buffer_put() when done using a
  1245. * buffer.
  1246. */
  1247. static void iio_buffer_release(struct kref *ref)
  1248. {
  1249. struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
  1250. buffer->access->release(buffer);
  1251. }
  1252. /**
  1253. * iio_buffer_get() - Grab a reference to the buffer
  1254. * @buffer: The buffer to grab a reference for, may be NULL
  1255. *
  1256. * Returns the pointer to the buffer that was passed into the function.
  1257. */
  1258. struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
  1259. {
  1260. if (buffer)
  1261. kref_get(&buffer->ref);
  1262. return buffer;
  1263. }
  1264. EXPORT_SYMBOL_GPL(iio_buffer_get);
  1265. /**
  1266. * iio_buffer_put() - Release the reference to the buffer
  1267. * @buffer: The buffer to release the reference for, may be NULL
  1268. */
  1269. void iio_buffer_put(struct iio_buffer *buffer)
  1270. {
  1271. if (buffer)
  1272. kref_put(&buffer->ref, iio_buffer_release);
  1273. }
  1274. EXPORT_SYMBOL_GPL(iio_buffer_put);
  1275. /**
  1276. * iio_device_attach_buffer - Attach a buffer to a IIO device
  1277. * @indio_dev: The device the buffer should be attached to
  1278. * @buffer: The buffer to attach to the device
  1279. *
  1280. * This function attaches a buffer to a IIO device. The buffer stays attached to
  1281. * the device until the device is freed. The function should only be called at
  1282. * most once per device.
  1283. */
  1284. void iio_device_attach_buffer(struct iio_dev *indio_dev,
  1285. struct iio_buffer *buffer)
  1286. {
  1287. indio_dev->buffer = iio_buffer_get(buffer);
  1288. }
  1289. EXPORT_SYMBOL_GPL(iio_device_attach_buffer);