vkms_crtc.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. // SPDX-License-Identifier: GPL-2.0+
  2. #include <drm/drm_atomic.h>
  3. #include <drm/drm_atomic_helper.h>
  4. #include <drm/drm_probe_helper.h>
  5. #include <drm/drm_vblank.h>
  6. #include "vkms_drv.h"
  7. static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
  8. {
  9. struct vkms_output *output = container_of(timer, struct vkms_output,
  10. vblank_hrtimer);
  11. struct drm_crtc *crtc = &output->crtc;
  12. struct vkms_crtc_state *state;
  13. u64 ret_overrun;
  14. bool ret;
  15. ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
  16. output->period_ns);
  17. if (ret_overrun != 1)
  18. pr_warn("%s: vblank timer overrun\n", __func__);
  19. spin_lock(&output->lock);
  20. ret = drm_crtc_handle_vblank(crtc);
  21. if (!ret)
  22. DRM_ERROR("vkms failure on handling vblank");
  23. state = output->composer_state;
  24. spin_unlock(&output->lock);
  25. if (state && output->composer_enabled) {
  26. u64 frame = drm_crtc_accurate_vblank_count(crtc);
  27. /* update frame_start only if a queued vkms_composer_worker()
  28. * has read the data
  29. */
  30. spin_lock(&output->composer_lock);
  31. if (!state->crc_pending)
  32. state->frame_start = frame;
  33. else
  34. DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
  35. state->frame_start, frame);
  36. state->frame_end = frame;
  37. state->crc_pending = true;
  38. spin_unlock(&output->composer_lock);
  39. ret = queue_work(output->composer_workq, &state->composer_work);
  40. if (!ret)
  41. DRM_DEBUG_DRIVER("Composer worker already queued\n");
  42. }
  43. return HRTIMER_RESTART;
  44. }
  45. static int vkms_enable_vblank(struct drm_crtc *crtc)
  46. {
  47. struct drm_device *dev = crtc->dev;
  48. unsigned int pipe = drm_crtc_index(crtc);
  49. struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
  50. struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
  51. drm_calc_timestamping_constants(crtc, &crtc->mode);
  52. hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  53. out->vblank_hrtimer.function = &vkms_vblank_simulate;
  54. out->period_ns = ktime_set(0, vblank->framedur_ns);
  55. hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
  56. return 0;
  57. }
  58. static void vkms_disable_vblank(struct drm_crtc *crtc)
  59. {
  60. struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
  61. hrtimer_cancel(&out->vblank_hrtimer);
  62. }
  63. static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
  64. int *max_error, ktime_t *vblank_time,
  65. bool in_vblank_irq)
  66. {
  67. struct drm_device *dev = crtc->dev;
  68. unsigned int pipe = crtc->index;
  69. struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
  70. struct vkms_output *output = &vkmsdev->output;
  71. struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
  72. if (!READ_ONCE(vblank->enabled)) {
  73. *vblank_time = ktime_get();
  74. return true;
  75. }
  76. *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
  77. if (WARN_ON(*vblank_time == vblank->time))
  78. return true;
  79. /*
  80. * To prevent races we roll the hrtimer forward before we do any
  81. * interrupt processing - this is how real hw works (the interrupt is
  82. * only generated after all the vblank registers are updated) and what
  83. * the vblank core expects. Therefore we need to always correct the
  84. * timestampe by one frame.
  85. */
  86. *vblank_time -= output->period_ns;
  87. return true;
  88. }
  89. static struct drm_crtc_state *
  90. vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
  91. {
  92. struct vkms_crtc_state *vkms_state;
  93. if (WARN_ON(!crtc->state))
  94. return NULL;
  95. vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
  96. if (!vkms_state)
  97. return NULL;
  98. __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
  99. INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
  100. return &vkms_state->base;
  101. }
  102. static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
  103. struct drm_crtc_state *state)
  104. {
  105. struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
  106. __drm_atomic_helper_crtc_destroy_state(state);
  107. WARN_ON(work_pending(&vkms_state->composer_work));
  108. kfree(vkms_state->active_planes);
  109. kfree(vkms_state);
  110. }
  111. static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
  112. {
  113. struct vkms_crtc_state *vkms_state =
  114. kzalloc(sizeof(*vkms_state), GFP_KERNEL);
  115. if (crtc->state)
  116. vkms_atomic_crtc_destroy_state(crtc, crtc->state);
  117. __drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
  118. if (vkms_state)
  119. INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
  120. }
  121. static const struct drm_crtc_funcs vkms_crtc_funcs = {
  122. .set_config = drm_atomic_helper_set_config,
  123. .destroy = drm_crtc_cleanup,
  124. .page_flip = drm_atomic_helper_page_flip,
  125. .reset = vkms_atomic_crtc_reset,
  126. .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
  127. .atomic_destroy_state = vkms_atomic_crtc_destroy_state,
  128. .enable_vblank = vkms_enable_vblank,
  129. .disable_vblank = vkms_disable_vblank,
  130. .get_vblank_timestamp = vkms_get_vblank_timestamp,
  131. .get_crc_sources = vkms_get_crc_sources,
  132. .set_crc_source = vkms_set_crc_source,
  133. .verify_crc_source = vkms_verify_crc_source,
  134. };
  135. static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
  136. struct drm_crtc_state *state)
  137. {
  138. struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
  139. struct drm_plane *plane;
  140. struct drm_plane_state *plane_state;
  141. int i = 0, ret;
  142. if (vkms_state->active_planes)
  143. return 0;
  144. ret = drm_atomic_add_affected_planes(state->state, crtc);
  145. if (ret < 0)
  146. return ret;
  147. drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
  148. plane_state = drm_atomic_get_existing_plane_state(state->state,
  149. plane);
  150. WARN_ON(!plane_state);
  151. if (!plane_state->visible)
  152. continue;
  153. i++;
  154. }
  155. vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
  156. if (!vkms_state->active_planes)
  157. return -ENOMEM;
  158. vkms_state->num_active_planes = i;
  159. i = 0;
  160. drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
  161. plane_state = drm_atomic_get_existing_plane_state(state->state,
  162. plane);
  163. if (!plane_state->visible)
  164. continue;
  165. vkms_state->active_planes[i++] =
  166. to_vkms_plane_state(plane_state);
  167. }
  168. return 0;
  169. }
  170. static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
  171. struct drm_crtc_state *old_state)
  172. {
  173. drm_crtc_vblank_on(crtc);
  174. }
  175. static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
  176. struct drm_crtc_state *old_state)
  177. {
  178. drm_crtc_vblank_off(crtc);
  179. }
  180. static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
  181. struct drm_crtc_state *old_crtc_state)
  182. {
  183. struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
  184. /* This lock is held across the atomic commit to block vblank timer
  185. * from scheduling vkms_composer_worker until the composer is updated
  186. */
  187. spin_lock_irq(&vkms_output->lock);
  188. }
  189. static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
  190. struct drm_crtc_state *old_crtc_state)
  191. {
  192. struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
  193. if (crtc->state->event) {
  194. spin_lock(&crtc->dev->event_lock);
  195. if (drm_crtc_vblank_get(crtc) != 0)
  196. drm_crtc_send_vblank_event(crtc, crtc->state->event);
  197. else
  198. drm_crtc_arm_vblank_event(crtc, crtc->state->event);
  199. spin_unlock(&crtc->dev->event_lock);
  200. crtc->state->event = NULL;
  201. }
  202. vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
  203. spin_unlock_irq(&vkms_output->lock);
  204. }
  205. static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
  206. .atomic_check = vkms_crtc_atomic_check,
  207. .atomic_begin = vkms_crtc_atomic_begin,
  208. .atomic_flush = vkms_crtc_atomic_flush,
  209. .atomic_enable = vkms_crtc_atomic_enable,
  210. .atomic_disable = vkms_crtc_atomic_disable,
  211. };
  212. int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
  213. struct drm_plane *primary, struct drm_plane *cursor)
  214. {
  215. struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
  216. int ret;
  217. ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
  218. &vkms_crtc_funcs, NULL);
  219. if (ret) {
  220. DRM_ERROR("Failed to init CRTC\n");
  221. return ret;
  222. }
  223. drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
  224. spin_lock_init(&vkms_out->lock);
  225. spin_lock_init(&vkms_out->composer_lock);
  226. vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
  227. if (!vkms_out->composer_workq)
  228. return -ENOMEM;
  229. return ret;
  230. }