virqfd.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * VFIO generic eventfd code for IRQFD support.
  4. * Derived from drivers/vfio/pci/vfio_pci_intrs.c
  5. *
  6. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  7. * Author: Alex Williamson <alex.williamson@redhat.com>
  8. */
  9. #include <linux/vfio.h>
  10. #include <linux/eventfd.h>
  11. #include <linux/file.h>
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #define DRIVER_VERSION "0.1"
  15. #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
  16. #define DRIVER_DESC "IRQFD support for VFIO bus drivers"
  17. static struct workqueue_struct *vfio_irqfd_cleanup_wq;
  18. static DEFINE_SPINLOCK(virqfd_lock);
  19. static int __init vfio_virqfd_init(void)
  20. {
  21. vfio_irqfd_cleanup_wq =
  22. create_singlethread_workqueue("vfio-irqfd-cleanup");
  23. if (!vfio_irqfd_cleanup_wq)
  24. return -ENOMEM;
  25. return 0;
  26. }
  27. static void __exit vfio_virqfd_exit(void)
  28. {
  29. destroy_workqueue(vfio_irqfd_cleanup_wq);
  30. }
  31. static void virqfd_deactivate(struct virqfd *virqfd)
  32. {
  33. queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
  34. }
  35. static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
  36. {
  37. struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
  38. __poll_t flags = key_to_poll(key);
  39. if (flags & EPOLLIN) {
  40. /* An event has been signaled, call function */
  41. if ((!virqfd->handler ||
  42. virqfd->handler(virqfd->opaque, virqfd->data)) &&
  43. virqfd->thread)
  44. schedule_work(&virqfd->inject);
  45. }
  46. if (flags & EPOLLHUP) {
  47. unsigned long flags;
  48. spin_lock_irqsave(&virqfd_lock, flags);
  49. /*
  50. * The eventfd is closing, if the virqfd has not yet been
  51. * queued for release, as determined by testing whether the
  52. * virqfd pointer to it is still valid, queue it now. As
  53. * with kvm irqfds, we know we won't race against the virqfd
  54. * going away because we hold the lock to get here.
  55. */
  56. if (*(virqfd->pvirqfd) == virqfd) {
  57. *(virqfd->pvirqfd) = NULL;
  58. virqfd_deactivate(virqfd);
  59. }
  60. spin_unlock_irqrestore(&virqfd_lock, flags);
  61. }
  62. return 0;
  63. }
  64. static void virqfd_ptable_queue_proc(struct file *file,
  65. wait_queue_head_t *wqh, poll_table *pt)
  66. {
  67. struct virqfd *virqfd = container_of(pt, struct virqfd, pt);
  68. add_wait_queue(wqh, &virqfd->wait);
  69. }
  70. static void virqfd_shutdown(struct work_struct *work)
  71. {
  72. struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
  73. u64 cnt;
  74. eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
  75. flush_work(&virqfd->inject);
  76. eventfd_ctx_put(virqfd->eventfd);
  77. kfree(virqfd);
  78. }
  79. static void virqfd_inject(struct work_struct *work)
  80. {
  81. struct virqfd *virqfd = container_of(work, struct virqfd, inject);
  82. if (virqfd->thread)
  83. virqfd->thread(virqfd->opaque, virqfd->data);
  84. }
  85. int vfio_virqfd_enable(void *opaque,
  86. int (*handler)(void *, void *),
  87. void (*thread)(void *, void *),
  88. void *data, struct virqfd **pvirqfd, int fd)
  89. {
  90. struct fd irqfd;
  91. struct eventfd_ctx *ctx;
  92. struct virqfd *virqfd;
  93. int ret = 0;
  94. __poll_t events;
  95. virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
  96. if (!virqfd)
  97. return -ENOMEM;
  98. virqfd->pvirqfd = pvirqfd;
  99. virqfd->opaque = opaque;
  100. virqfd->handler = handler;
  101. virqfd->thread = thread;
  102. virqfd->data = data;
  103. INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
  104. INIT_WORK(&virqfd->inject, virqfd_inject);
  105. irqfd = fdget(fd);
  106. if (!irqfd.file) {
  107. ret = -EBADF;
  108. goto err_fd;
  109. }
  110. ctx = eventfd_ctx_fileget(irqfd.file);
  111. if (IS_ERR(ctx)) {
  112. ret = PTR_ERR(ctx);
  113. goto err_ctx;
  114. }
  115. virqfd->eventfd = ctx;
  116. /*
  117. * virqfds can be released by closing the eventfd or directly
  118. * through ioctl. These are both done through a workqueue, so
  119. * we update the pointer to the virqfd under lock to avoid
  120. * pushing multiple jobs to release the same virqfd.
  121. */
  122. spin_lock_irq(&virqfd_lock);
  123. if (*pvirqfd) {
  124. spin_unlock_irq(&virqfd_lock);
  125. ret = -EBUSY;
  126. goto err_busy;
  127. }
  128. *pvirqfd = virqfd;
  129. spin_unlock_irq(&virqfd_lock);
  130. /*
  131. * Install our own custom wake-up handling so we are notified via
  132. * a callback whenever someone signals the underlying eventfd.
  133. */
  134. init_waitqueue_func_entry(&virqfd->wait, virqfd_wakeup);
  135. init_poll_funcptr(&virqfd->pt, virqfd_ptable_queue_proc);
  136. events = vfs_poll(irqfd.file, &virqfd->pt);
  137. /*
  138. * Check if there was an event already pending on the eventfd
  139. * before we registered and trigger it as if we didn't miss it.
  140. */
  141. if (events & EPOLLIN) {
  142. if ((!handler || handler(opaque, data)) && thread)
  143. schedule_work(&virqfd->inject);
  144. }
  145. /*
  146. * Do not drop the file until the irqfd is fully initialized,
  147. * otherwise we might race against the EPOLLHUP.
  148. */
  149. fdput(irqfd);
  150. return 0;
  151. err_busy:
  152. eventfd_ctx_put(ctx);
  153. err_ctx:
  154. fdput(irqfd);
  155. err_fd:
  156. kfree(virqfd);
  157. return ret;
  158. }
  159. EXPORT_SYMBOL_GPL(vfio_virqfd_enable);
  160. void vfio_virqfd_disable(struct virqfd **pvirqfd)
  161. {
  162. unsigned long flags;
  163. spin_lock_irqsave(&virqfd_lock, flags);
  164. if (*pvirqfd) {
  165. virqfd_deactivate(*pvirqfd);
  166. *pvirqfd = NULL;
  167. }
  168. spin_unlock_irqrestore(&virqfd_lock, flags);
  169. /*
  170. * Block until we know all outstanding shutdown jobs have completed.
  171. * Even if we don't queue the job, flush the wq to be sure it's
  172. * been released.
  173. */
  174. flush_workqueue(vfio_irqfd_cleanup_wq);
  175. }
  176. EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
  177. module_init(vfio_virqfd_init);
  178. module_exit(vfio_virqfd_exit);
  179. MODULE_VERSION(DRIVER_VERSION);
  180. MODULE_LICENSE("GPL v2");
  181. MODULE_AUTHOR(DRIVER_AUTHOR);
  182. MODULE_DESCRIPTION(DRIVER_DESC);