most_cdev.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * cdev.c - Character device component for Mostcore
  4. *
  5. * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
  6. */
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/fs.h>
  10. #include <linux/slab.h>
  11. #include <linux/device.h>
  12. #include <linux/cdev.h>
  13. #include <linux/poll.h>
  14. #include <linux/kfifo.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/idr.h>
  17. #include <linux/most.h>
  18. #define CHRDEV_REGION_SIZE 50
  19. static struct cdev_component {
  20. dev_t devno;
  21. struct ida minor_id;
  22. unsigned int major;
  23. struct class *class;
  24. struct most_component cc;
  25. } comp;
  26. struct comp_channel {
  27. wait_queue_head_t wq;
  28. spinlock_t unlink; /* synchronization lock to unlink channels */
  29. struct cdev cdev;
  30. struct device *dev;
  31. struct mutex io_mutex;
  32. struct most_interface *iface;
  33. struct most_channel_config *cfg;
  34. unsigned int channel_id;
  35. dev_t devno;
  36. size_t mbo_offs;
  37. DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
  38. int access_ref;
  39. struct list_head list;
  40. };
  41. #define to_channel(d) container_of(d, struct comp_channel, cdev)
  42. static struct list_head channel_list;
  43. static spinlock_t ch_list_lock;
  44. static inline bool ch_has_mbo(struct comp_channel *c)
  45. {
  46. return channel_has_mbo(c->iface, c->channel_id, &comp.cc) > 0;
  47. }
  48. static inline struct mbo *ch_get_mbo(struct comp_channel *c, struct mbo **mbo)
  49. {
  50. if (!kfifo_peek(&c->fifo, mbo)) {
  51. *mbo = most_get_mbo(c->iface, c->channel_id, &comp.cc);
  52. if (*mbo)
  53. kfifo_in(&c->fifo, mbo, 1);
  54. }
  55. return *mbo;
  56. }
  57. static struct comp_channel *get_channel(struct most_interface *iface, int id)
  58. {
  59. struct comp_channel *c, *tmp;
  60. unsigned long flags;
  61. spin_lock_irqsave(&ch_list_lock, flags);
  62. list_for_each_entry_safe(c, tmp, &channel_list, list) {
  63. if ((c->iface == iface) && (c->channel_id == id)) {
  64. spin_unlock_irqrestore(&ch_list_lock, flags);
  65. return c;
  66. }
  67. }
  68. spin_unlock_irqrestore(&ch_list_lock, flags);
  69. return NULL;
  70. }
  71. static void stop_channel(struct comp_channel *c)
  72. {
  73. struct mbo *mbo;
  74. while (kfifo_out((struct kfifo *)&c->fifo, &mbo, 1))
  75. most_put_mbo(mbo);
  76. most_stop_channel(c->iface, c->channel_id, &comp.cc);
  77. }
  78. static void destroy_cdev(struct comp_channel *c)
  79. {
  80. unsigned long flags;
  81. device_destroy(comp.class, c->devno);
  82. cdev_del(&c->cdev);
  83. spin_lock_irqsave(&ch_list_lock, flags);
  84. list_del(&c->list);
  85. spin_unlock_irqrestore(&ch_list_lock, flags);
  86. }
  87. static void destroy_channel(struct comp_channel *c)
  88. {
  89. ida_simple_remove(&comp.minor_id, MINOR(c->devno));
  90. kfifo_free(&c->fifo);
  91. kfree(c);
  92. }
  93. /**
  94. * comp_open - implements the syscall to open the device
  95. * @inode: inode pointer
  96. * @filp: file pointer
  97. *
  98. * This stores the channel pointer in the private data field of
  99. * the file structure and activates the channel within the core.
  100. */
  101. static int comp_open(struct inode *inode, struct file *filp)
  102. {
  103. struct comp_channel *c;
  104. int ret;
  105. c = to_channel(inode->i_cdev);
  106. filp->private_data = c;
  107. if (((c->cfg->direction == MOST_CH_RX) &&
  108. ((filp->f_flags & O_ACCMODE) != O_RDONLY)) ||
  109. ((c->cfg->direction == MOST_CH_TX) &&
  110. ((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
  111. return -EACCES;
  112. }
  113. mutex_lock(&c->io_mutex);
  114. if (!c->dev) {
  115. mutex_unlock(&c->io_mutex);
  116. return -ENODEV;
  117. }
  118. if (c->access_ref) {
  119. mutex_unlock(&c->io_mutex);
  120. return -EBUSY;
  121. }
  122. c->mbo_offs = 0;
  123. ret = most_start_channel(c->iface, c->channel_id, &comp.cc);
  124. if (!ret)
  125. c->access_ref = 1;
  126. mutex_unlock(&c->io_mutex);
  127. return ret;
  128. }
  129. /**
  130. * comp_close - implements the syscall to close the device
  131. * @inode: inode pointer
  132. * @filp: file pointer
  133. *
  134. * This stops the channel within the core.
  135. */
  136. static int comp_close(struct inode *inode, struct file *filp)
  137. {
  138. struct comp_channel *c = to_channel(inode->i_cdev);
  139. mutex_lock(&c->io_mutex);
  140. spin_lock(&c->unlink);
  141. c->access_ref = 0;
  142. spin_unlock(&c->unlink);
  143. if (c->dev) {
  144. stop_channel(c);
  145. mutex_unlock(&c->io_mutex);
  146. } else {
  147. mutex_unlock(&c->io_mutex);
  148. destroy_channel(c);
  149. }
  150. return 0;
  151. }
  152. /**
  153. * comp_write - implements the syscall to write to the device
  154. * @filp: file pointer
  155. * @buf: pointer to user buffer
  156. * @count: number of bytes to write
  157. * @offset: offset from where to start writing
  158. */
  159. static ssize_t comp_write(struct file *filp, const char __user *buf,
  160. size_t count, loff_t *offset)
  161. {
  162. int ret;
  163. size_t to_copy, left;
  164. struct mbo *mbo = NULL;
  165. struct comp_channel *c = filp->private_data;
  166. mutex_lock(&c->io_mutex);
  167. while (c->dev && !ch_get_mbo(c, &mbo)) {
  168. mutex_unlock(&c->io_mutex);
  169. if ((filp->f_flags & O_NONBLOCK))
  170. return -EAGAIN;
  171. if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev))
  172. return -ERESTARTSYS;
  173. mutex_lock(&c->io_mutex);
  174. }
  175. if (unlikely(!c->dev)) {
  176. ret = -ENODEV;
  177. goto unlock;
  178. }
  179. to_copy = min(count, c->cfg->buffer_size - c->mbo_offs);
  180. left = copy_from_user(mbo->virt_address + c->mbo_offs, buf, to_copy);
  181. if (left == to_copy) {
  182. ret = -EFAULT;
  183. goto unlock;
  184. }
  185. c->mbo_offs += to_copy - left;
  186. if (c->mbo_offs >= c->cfg->buffer_size ||
  187. c->cfg->data_type == MOST_CH_CONTROL ||
  188. c->cfg->data_type == MOST_CH_ASYNC) {
  189. kfifo_skip(&c->fifo);
  190. mbo->buffer_length = c->mbo_offs;
  191. c->mbo_offs = 0;
  192. most_submit_mbo(mbo);
  193. }
  194. ret = to_copy - left;
  195. unlock:
  196. mutex_unlock(&c->io_mutex);
  197. return ret;
  198. }
  199. /**
  200. * comp_read - implements the syscall to read from the device
  201. * @filp: file pointer
  202. * @buf: pointer to user buffer
  203. * @count: number of bytes to read
  204. * @offset: offset from where to start reading
  205. */
  206. static ssize_t
  207. comp_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
  208. {
  209. size_t to_copy, not_copied, copied;
  210. struct mbo *mbo = NULL;
  211. struct comp_channel *c = filp->private_data;
  212. mutex_lock(&c->io_mutex);
  213. while (c->dev && !kfifo_peek(&c->fifo, &mbo)) {
  214. mutex_unlock(&c->io_mutex);
  215. if (filp->f_flags & O_NONBLOCK)
  216. return -EAGAIN;
  217. if (wait_event_interruptible(c->wq,
  218. (!kfifo_is_empty(&c->fifo) ||
  219. (!c->dev))))
  220. return -ERESTARTSYS;
  221. mutex_lock(&c->io_mutex);
  222. }
  223. /* make sure we don't submit to gone devices */
  224. if (unlikely(!c->dev)) {
  225. mutex_unlock(&c->io_mutex);
  226. return -ENODEV;
  227. }
  228. to_copy = min_t(size_t,
  229. count,
  230. mbo->processed_length - c->mbo_offs);
  231. not_copied = copy_to_user(buf,
  232. mbo->virt_address + c->mbo_offs,
  233. to_copy);
  234. copied = to_copy - not_copied;
  235. c->mbo_offs += copied;
  236. if (c->mbo_offs >= mbo->processed_length) {
  237. kfifo_skip(&c->fifo);
  238. most_put_mbo(mbo);
  239. c->mbo_offs = 0;
  240. }
  241. mutex_unlock(&c->io_mutex);
  242. return copied;
  243. }
  244. static __poll_t comp_poll(struct file *filp, poll_table *wait)
  245. {
  246. struct comp_channel *c = filp->private_data;
  247. __poll_t mask = 0;
  248. poll_wait(filp, &c->wq, wait);
  249. mutex_lock(&c->io_mutex);
  250. if (c->cfg->direction == MOST_CH_RX) {
  251. if (!c->dev || !kfifo_is_empty(&c->fifo))
  252. mask |= EPOLLIN | EPOLLRDNORM;
  253. } else {
  254. if (!c->dev || !kfifo_is_empty(&c->fifo) || ch_has_mbo(c))
  255. mask |= EPOLLOUT | EPOLLWRNORM;
  256. }
  257. mutex_unlock(&c->io_mutex);
  258. return mask;
  259. }
  260. /**
  261. * Initialization of struct file_operations
  262. */
  263. static const struct file_operations channel_fops = {
  264. .owner = THIS_MODULE,
  265. .read = comp_read,
  266. .write = comp_write,
  267. .open = comp_open,
  268. .release = comp_close,
  269. .poll = comp_poll,
  270. };
  271. /**
  272. * comp_disconnect_channel - disconnect a channel
  273. * @iface: pointer to interface instance
  274. * @channel_id: channel index
  275. *
  276. * This frees allocated memory and removes the cdev that represents this
  277. * channel in user space.
  278. */
  279. static int comp_disconnect_channel(struct most_interface *iface, int channel_id)
  280. {
  281. struct comp_channel *c;
  282. c = get_channel(iface, channel_id);
  283. if (!c)
  284. return -EINVAL;
  285. mutex_lock(&c->io_mutex);
  286. spin_lock(&c->unlink);
  287. c->dev = NULL;
  288. spin_unlock(&c->unlink);
  289. destroy_cdev(c);
  290. if (c->access_ref) {
  291. stop_channel(c);
  292. wake_up_interruptible(&c->wq);
  293. mutex_unlock(&c->io_mutex);
  294. } else {
  295. mutex_unlock(&c->io_mutex);
  296. destroy_channel(c);
  297. }
  298. return 0;
  299. }
  300. /**
  301. * comp_rx_completion - completion handler for rx channels
  302. * @mbo: pointer to buffer object that has completed
  303. *
  304. * This searches for the channel linked to this MBO and stores it in the local
  305. * fifo buffer.
  306. */
  307. static int comp_rx_completion(struct mbo *mbo)
  308. {
  309. struct comp_channel *c;
  310. if (!mbo)
  311. return -EINVAL;
  312. c = get_channel(mbo->ifp, mbo->hdm_channel_id);
  313. if (!c)
  314. return -EINVAL;
  315. spin_lock(&c->unlink);
  316. if (!c->access_ref || !c->dev) {
  317. spin_unlock(&c->unlink);
  318. return -ENODEV;
  319. }
  320. kfifo_in(&c->fifo, &mbo, 1);
  321. spin_unlock(&c->unlink);
  322. #ifdef DEBUG_MESG
  323. if (kfifo_is_full(&c->fifo))
  324. dev_warn(c->dev, "Fifo is full\n");
  325. #endif
  326. wake_up_interruptible(&c->wq);
  327. return 0;
  328. }
  329. /**
  330. * comp_tx_completion - completion handler for tx channels
  331. * @iface: pointer to interface instance
  332. * @channel_id: channel index/ID
  333. *
  334. * This wakes sleeping processes in the wait-queue.
  335. */
  336. static int comp_tx_completion(struct most_interface *iface, int channel_id)
  337. {
  338. struct comp_channel *c;
  339. c = get_channel(iface, channel_id);
  340. if (!c)
  341. return -EINVAL;
  342. if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
  343. dev_warn(c->dev, "Channel ID out of range\n");
  344. return -EINVAL;
  345. }
  346. wake_up_interruptible(&c->wq);
  347. return 0;
  348. }
  349. /**
  350. * comp_probe - probe function of the driver module
  351. * @iface: pointer to interface instance
  352. * @channel_id: channel index/ID
  353. * @cfg: pointer to actual channel configuration
  354. * @name: name of the device to be created
  355. *
  356. * This allocates achannel object and creates the device node in /dev
  357. *
  358. * Returns 0 on success or error code otherwise.
  359. */
  360. static int comp_probe(struct most_interface *iface, int channel_id,
  361. struct most_channel_config *cfg, char *name, char *args)
  362. {
  363. struct comp_channel *c;
  364. unsigned long cl_flags;
  365. int retval;
  366. int current_minor;
  367. if (!cfg || !name)
  368. return -EINVAL;
  369. c = get_channel(iface, channel_id);
  370. if (c)
  371. return -EEXIST;
  372. current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
  373. if (current_minor < 0)
  374. return current_minor;
  375. c = kzalloc(sizeof(*c), GFP_KERNEL);
  376. if (!c) {
  377. retval = -ENOMEM;
  378. goto err_remove_ida;
  379. }
  380. c->devno = MKDEV(comp.major, current_minor);
  381. cdev_init(&c->cdev, &channel_fops);
  382. c->cdev.owner = THIS_MODULE;
  383. retval = cdev_add(&c->cdev, c->devno, 1);
  384. if (retval < 0)
  385. goto err_free_c;
  386. c->iface = iface;
  387. c->cfg = cfg;
  388. c->channel_id = channel_id;
  389. c->access_ref = 0;
  390. spin_lock_init(&c->unlink);
  391. INIT_KFIFO(c->fifo);
  392. retval = kfifo_alloc(&c->fifo, cfg->num_buffers, GFP_KERNEL);
  393. if (retval)
  394. goto err_del_cdev_and_free_channel;
  395. init_waitqueue_head(&c->wq);
  396. mutex_init(&c->io_mutex);
  397. spin_lock_irqsave(&ch_list_lock, cl_flags);
  398. list_add_tail(&c->list, &channel_list);
  399. spin_unlock_irqrestore(&ch_list_lock, cl_flags);
  400. c->dev = device_create(comp.class, NULL, c->devno, NULL, "%s", name);
  401. if (IS_ERR(c->dev)) {
  402. retval = PTR_ERR(c->dev);
  403. goto err_free_kfifo_and_del_list;
  404. }
  405. kobject_uevent(&c->dev->kobj, KOBJ_ADD);
  406. return 0;
  407. err_free_kfifo_and_del_list:
  408. kfifo_free(&c->fifo);
  409. list_del(&c->list);
  410. err_del_cdev_and_free_channel:
  411. cdev_del(&c->cdev);
  412. err_free_c:
  413. kfree(c);
  414. err_remove_ida:
  415. ida_simple_remove(&comp.minor_id, current_minor);
  416. return retval;
  417. }
  418. static struct cdev_component comp = {
  419. .cc = {
  420. .mod = THIS_MODULE,
  421. .name = "cdev",
  422. .probe_channel = comp_probe,
  423. .disconnect_channel = comp_disconnect_channel,
  424. .rx_completion = comp_rx_completion,
  425. .tx_completion = comp_tx_completion,
  426. },
  427. };
  428. static int __init mod_init(void)
  429. {
  430. int err;
  431. comp.class = class_create(THIS_MODULE, "most_cdev");
  432. if (IS_ERR(comp.class))
  433. return PTR_ERR(comp.class);
  434. INIT_LIST_HEAD(&channel_list);
  435. spin_lock_init(&ch_list_lock);
  436. ida_init(&comp.minor_id);
  437. err = alloc_chrdev_region(&comp.devno, 0, CHRDEV_REGION_SIZE, "cdev");
  438. if (err < 0)
  439. goto dest_ida;
  440. comp.major = MAJOR(comp.devno);
  441. err = most_register_component(&comp.cc);
  442. if (err)
  443. goto free_cdev;
  444. err = most_register_configfs_subsys(&comp.cc);
  445. if (err)
  446. goto deregister_comp;
  447. return 0;
  448. deregister_comp:
  449. most_deregister_component(&comp.cc);
  450. free_cdev:
  451. unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
  452. dest_ida:
  453. ida_destroy(&comp.minor_id);
  454. class_destroy(comp.class);
  455. return err;
  456. }
  457. static void __exit mod_exit(void)
  458. {
  459. struct comp_channel *c, *tmp;
  460. most_deregister_configfs_subsys(&comp.cc);
  461. most_deregister_component(&comp.cc);
  462. list_for_each_entry_safe(c, tmp, &channel_list, list) {
  463. destroy_cdev(c);
  464. destroy_channel(c);
  465. }
  466. unregister_chrdev_region(comp.devno, CHRDEV_REGION_SIZE);
  467. ida_destroy(&comp.minor_id);
  468. class_destroy(comp.class);
  469. }
  470. module_init(mod_init);
  471. module_exit(mod_exit);
  472. MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
  473. MODULE_LICENSE("GPL");
  474. MODULE_DESCRIPTION("character device component for mostcore");