drm_vblank_work.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. // SPDX-License-Identifier: MIT
  2. #include <uapi/linux/sched/types.h>
  3. #include <drm/drm_print.h>
  4. #include <drm/drm_vblank.h>
  5. #include <drm/drm_vblank_work.h>
  6. #include <drm/drm_crtc.h>
  7. #include "drm_internal.h"
  8. /**
  9. * DOC: vblank works
  10. *
  11. * Many DRM drivers need to program hardware in a time-sensitive manner, many
  12. * times with a deadline of starting and finishing within a certain region of
  13. * the scanout. Most of the time the safest way to accomplish this is to
  14. * simply do said time-sensitive programming in the driver's IRQ handler,
  15. * which allows drivers to avoid being preempted during these critical
  16. * regions. Or even better, the hardware may even handle applying such
  17. * time-critical programming independently of the CPU.
  18. *
  19. * While there's a decent amount of hardware that's designed so that the CPU
  20. * doesn't need to be concerned with extremely time-sensitive programming,
  21. * there's a few situations where it can't be helped. Some unforgiving
  22. * hardware may require that certain time-sensitive programming be handled
  23. * completely by the CPU, and said programming may even take too long to
  24. * handle in an IRQ handler. Another such situation would be where the driver
  25. * needs to perform a task that needs to complete within a specific scanout
  26. * period, but might possibly block and thus cannot be handled in an IRQ
  27. * context. Both of these situations can't be solved perfectly in Linux since
  28. * we're not a realtime kernel, and thus the scheduler may cause us to miss
  29. * our deadline if it decides to preempt us. But for some drivers, it's good
  30. * enough if we can lower our chance of being preempted to an absolute
  31. * minimum.
  32. *
  33. * This is where &drm_vblank_work comes in. &drm_vblank_work provides a simple
  34. * generic delayed work implementation which delays work execution until a
  35. * particular vblank has passed, and then executes the work at realtime
  36. * priority. This provides the best possible chance at performing
  37. * time-sensitive hardware programming on time, even when the system is under
  38. * heavy load. &drm_vblank_work also supports rescheduling, so that self
  39. * re-arming work items can be easily implemented.
  40. */
  41. void drm_handle_vblank_works(struct drm_vblank_crtc *vblank)
  42. {
  43. struct drm_vblank_work *work, *next;
  44. u64 count = atomic64_read(&vblank->count);
  45. bool wake = false;
  46. assert_spin_locked(&vblank->dev->event_lock);
  47. list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
  48. if (!drm_vblank_passed(count, work->count))
  49. continue;
  50. list_del_init(&work->node);
  51. drm_vblank_put(vblank->dev, vblank->pipe);
  52. kthread_queue_work(vblank->worker, &work->base);
  53. wake = true;
  54. }
  55. if (wake)
  56. wake_up_all(&vblank->work_wait_queue);
  57. }
  58. /* Handle cancelling any pending vblank work items and drop respective vblank
  59. * references in response to vblank interrupts being disabled.
  60. */
  61. void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank)
  62. {
  63. struct drm_vblank_work *work, *next;
  64. assert_spin_locked(&vblank->dev->event_lock);
  65. list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
  66. list_del_init(&work->node);
  67. drm_vblank_put(vblank->dev, vblank->pipe);
  68. }
  69. wake_up_all(&vblank->work_wait_queue);
  70. }
  71. /**
  72. * drm_vblank_work_schedule - schedule a vblank work
  73. * @work: vblank work to schedule
  74. * @count: target vblank count
  75. * @nextonmiss: defer until the next vblank if target vblank was missed
  76. *
  77. * Schedule @work for execution once the crtc vblank count reaches @count.
  78. *
  79. * If the crtc vblank count has already reached @count and @nextonmiss is
  80. * %false the work starts to execute immediately.
  81. *
  82. * If the crtc vblank count has already reached @count and @nextonmiss is
  83. * %true the work is deferred until the next vblank (as if @count has been
  84. * specified as crtc vblank count + 1).
  85. *
  86. * If @work is already scheduled, this function will reschedule said work
  87. * using the new @count. This can be used for self-rearming work items.
  88. *
  89. * Returns:
  90. * %1 if @work was successfully (re)scheduled, %0 if it was either already
  91. * scheduled or cancelled, or a negative error code on failure.
  92. */
  93. int drm_vblank_work_schedule(struct drm_vblank_work *work,
  94. u64 count, bool nextonmiss)
  95. {
  96. struct drm_vblank_crtc *vblank = work->vblank;
  97. struct drm_device *dev = vblank->dev;
  98. u64 cur_vbl;
  99. unsigned long irqflags;
  100. bool passed, inmodeset, rescheduling = false, wake = false;
  101. int ret = 0;
  102. spin_lock_irqsave(&dev->event_lock, irqflags);
  103. if (work->cancelling)
  104. goto out;
  105. spin_lock(&dev->vbl_lock);
  106. inmodeset = vblank->inmodeset;
  107. spin_unlock(&dev->vbl_lock);
  108. if (inmodeset)
  109. goto out;
  110. if (list_empty(&work->node)) {
  111. ret = drm_vblank_get(dev, vblank->pipe);
  112. if (ret < 0)
  113. goto out;
  114. } else if (work->count == count) {
  115. /* Already scheduled w/ same vbl count */
  116. goto out;
  117. } else {
  118. rescheduling = true;
  119. }
  120. work->count = count;
  121. cur_vbl = drm_vblank_count(dev, vblank->pipe);
  122. passed = drm_vblank_passed(cur_vbl, count);
  123. if (passed)
  124. drm_dbg_core(dev,
  125. "crtc %d vblank %llu already passed (current %llu)\n",
  126. vblank->pipe, count, cur_vbl);
  127. if (!nextonmiss && passed) {
  128. drm_vblank_put(dev, vblank->pipe);
  129. ret = kthread_queue_work(vblank->worker, &work->base);
  130. if (rescheduling) {
  131. list_del_init(&work->node);
  132. wake = true;
  133. }
  134. } else {
  135. if (!rescheduling)
  136. list_add_tail(&work->node, &vblank->pending_work);
  137. ret = true;
  138. }
  139. out:
  140. spin_unlock_irqrestore(&dev->event_lock, irqflags);
  141. if (wake)
  142. wake_up_all(&vblank->work_wait_queue);
  143. return ret;
  144. }
  145. EXPORT_SYMBOL(drm_vblank_work_schedule);
  146. /**
  147. * drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to
  148. * finish executing
  149. * @work: vblank work to cancel
  150. *
  151. * Cancel an already scheduled vblank work and wait for its
  152. * execution to finish.
  153. *
  154. * On return, @work is guaranteed to no longer be scheduled or running, even
  155. * if it's self-arming.
  156. *
  157. * Returns:
  158. * %True if the work was cancelled before it started to execute, %false
  159. * otherwise.
  160. */
  161. bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
  162. {
  163. struct drm_vblank_crtc *vblank = work->vblank;
  164. struct drm_device *dev = vblank->dev;
  165. bool ret = false;
  166. spin_lock_irq(&dev->event_lock);
  167. if (!list_empty(&work->node)) {
  168. list_del_init(&work->node);
  169. drm_vblank_put(vblank->dev, vblank->pipe);
  170. ret = true;
  171. }
  172. work->cancelling++;
  173. spin_unlock_irq(&dev->event_lock);
  174. wake_up_all(&vblank->work_wait_queue);
  175. if (kthread_cancel_work_sync(&work->base))
  176. ret = true;
  177. spin_lock_irq(&dev->event_lock);
  178. work->cancelling--;
  179. spin_unlock_irq(&dev->event_lock);
  180. return ret;
  181. }
  182. EXPORT_SYMBOL(drm_vblank_work_cancel_sync);
  183. /**
  184. * drm_vblank_work_flush - wait for a scheduled vblank work to finish
  185. * executing
  186. * @work: vblank work to flush
  187. *
  188. * Wait until @work has finished executing once.
  189. */
  190. void drm_vblank_work_flush(struct drm_vblank_work *work)
  191. {
  192. struct drm_vblank_crtc *vblank = work->vblank;
  193. struct drm_device *dev = vblank->dev;
  194. spin_lock_irq(&dev->event_lock);
  195. wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
  196. dev->event_lock);
  197. spin_unlock_irq(&dev->event_lock);
  198. kthread_flush_work(&work->base);
  199. }
  200. EXPORT_SYMBOL(drm_vblank_work_flush);
  201. /**
  202. * drm_vblank_work_init - initialize a vblank work item
  203. * @work: vblank work item
  204. * @crtc: CRTC whose vblank will trigger the work execution
  205. * @func: work function to be executed
  206. *
  207. * Initialize a vblank work item for a specific crtc.
  208. */
  209. void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
  210. void (*func)(struct kthread_work *work))
  211. {
  212. kthread_init_work(&work->base, func);
  213. INIT_LIST_HEAD(&work->node);
  214. work->vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
  215. }
  216. EXPORT_SYMBOL(drm_vblank_work_init);
  217. int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
  218. {
  219. struct kthread_worker *worker;
  220. INIT_LIST_HEAD(&vblank->pending_work);
  221. init_waitqueue_head(&vblank->work_wait_queue);
  222. worker = kthread_create_worker(0, "card%d-crtc%d",
  223. vblank->dev->primary->index,
  224. vblank->pipe);
  225. if (IS_ERR(worker))
  226. return PTR_ERR(worker);
  227. vblank->worker = worker;
  228. sched_set_fifo(worker->task);
  229. return 0;
  230. }